blob: 3fa64492ee62c65e716f5e2d0bb2c88fbb4f1f7f [file] [log] [blame]
Ingo Molnarabaff322009-06-02 22:59:57 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02007 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +02009
10#include "perf.h"
11
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020012#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020013#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060014#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090016#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020017
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030018#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030019#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020020#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020021#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020022#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020023#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020024#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060025#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Wang Nand8871ea2016-02-26 09:32:06 +000041#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020042
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020043#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020044#include <sched.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030045#include <sys/mman.h>
Wang Nan2d11c652016-05-23 07:13:39 +000046#include <asm/bug.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030047#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030048
Jiri Olsa1b43b702017-01-09 10:51:56 +010049struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010050 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010051 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010052 unsigned long size;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010053 const char *str;
54 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010055};
56
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030057struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020058 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030059 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020060 u64 bytes_written;
Jiri Olsaf5fc14122013-10-15 16:27:32 +020061 struct perf_data_file file;
Adrian Hunteref149c22015-04-09 18:53:45 +030062 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020063 struct perf_evlist *evlist;
64 struct perf_session *session;
65 const char *progname;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020066 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020067 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000068 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020069 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000070 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090071 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000072 bool timestamp_filename;
Jiri Olsa1b43b702017-01-09 10:51:56 +010073 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070074 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020075};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020076
Jiri Olsadc0c6122017-01-09 10:51:58 +010077static volatile int auxtrace_record__snapshot_started;
78static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
79static DEFINE_TRIGGER(switch_output_trigger);
80
81static bool switch_output_signal(struct record *rec)
82{
83 return rec->switch_output.signal &&
84 trigger_is_ready(&switch_output_trigger);
85}
86
87static bool switch_output_size(struct record *rec)
88{
89 return rec->switch_output.size &&
90 trigger_is_ready(&switch_output_trigger) &&
91 (rec->bytes_written >= rec->switch_output.size);
92}
93
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030094static int record__write(struct record *rec, void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +020095{
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030096 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +010097 pr_err("failed to write perf data, error: %m\n");
98 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +020099 }
David Ahern8d3eca22012-08-26 12:24:47 -0600100
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300101 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100102
103 if (switch_output_size(rec))
104 trigger_hit(&switch_output_trigger);
105
David Ahern8d3eca22012-08-26 12:24:47 -0600106 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200107}
108
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200109static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200110 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300111 struct perf_sample *sample __maybe_unused,
112 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200113{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300114 struct record *rec = container_of(tool, struct record, tool);
115 return record__write(rec, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200116}
117
Wang Nan3a62a7b2016-05-23 07:13:41 +0000118static int
119backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
120{
121 struct perf_event_header *pheader;
122 u64 evt_head = head;
123 int size = mask + 1;
124
125 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
126 pheader = (struct perf_event_header *)(buf + (head & mask));
127 *start = head;
128 while (true) {
129 if (evt_head - head >= (unsigned int)size) {
Colin Ian King5e30d552016-08-22 19:30:08 +0100130 pr_debug("Finished reading backward ring buffer: rewind\n");
Wang Nan3a62a7b2016-05-23 07:13:41 +0000131 if (evt_head - head > (unsigned int)size)
132 evt_head -= pheader->size;
133 *end = evt_head;
134 return 0;
135 }
136
137 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
138
139 if (pheader->size == 0) {
Colin Ian King5e30d552016-08-22 19:30:08 +0100140 pr_debug("Finished reading backward ring buffer: get start\n");
Wang Nan3a62a7b2016-05-23 07:13:41 +0000141 *end = evt_head;
142 return 0;
143 }
144
145 evt_head += pheader->size;
146 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
147 }
148 WARN_ONCE(1, "Shouldn't get here\n");
149 return -1;
150}
151
152static int
Wang Nana4ea0ec2016-07-14 08:34:36 +0000153rb_find_range(void *data, int mask, u64 head, u64 old,
154 u64 *start, u64 *end, bool backward)
Wang Nan3a62a7b2016-05-23 07:13:41 +0000155{
Wang Nana4ea0ec2016-07-14 08:34:36 +0000156 if (!backward) {
Wang Nan3a62a7b2016-05-23 07:13:41 +0000157 *start = old;
158 *end = head;
159 return 0;
160 }
161
162 return backward_rb_find_range(data, mask, head, start, end);
163}
164
Wang Nana4ea0ec2016-07-14 08:34:36 +0000165static int
166record__mmap_read(struct record *rec, struct perf_mmap *md,
167 bool overwrite, bool backward)
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200168{
David Ahern7b8283b52015-04-07 09:20:37 -0600169 u64 head = perf_mmap__read_head(md);
170 u64 old = md->prev;
Wang Nan09fa4f42016-05-23 07:13:40 +0000171 u64 end = head, start = old;
Jiri Olsa918512b2013-09-12 18:39:35 +0200172 unsigned char *data = md->base + page_size;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200173 unsigned long size;
174 void *buf;
David Ahern8d3eca22012-08-26 12:24:47 -0600175 int rc = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200176
Wang Nana4ea0ec2016-07-14 08:34:36 +0000177 if (rb_find_range(data, md->mask, head,
178 old, &start, &end, backward))
Wang Nan3a62a7b2016-05-23 07:13:41 +0000179 return -1;
180
Wang Nan09fa4f42016-05-23 07:13:40 +0000181 if (start == end)
David Ahern8d3eca22012-08-26 12:24:47 -0600182 return 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200183
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200184 rec->samples++;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200185
Wang Nan09fa4f42016-05-23 07:13:40 +0000186 size = end - start;
Wang Nan2d11c652016-05-23 07:13:39 +0000187 if (size > (unsigned long)(md->mask) + 1) {
188 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
189
190 md->prev = head;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000191 perf_mmap__consume(md, overwrite || backward);
Wang Nan2d11c652016-05-23 07:13:39 +0000192 return 0;
193 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200194
Wang Nan09fa4f42016-05-23 07:13:40 +0000195 if ((start & md->mask) + size != (end & md->mask)) {
196 buf = &data[start & md->mask];
197 size = md->mask + 1 - (start & md->mask);
198 start += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200199
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300200 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600201 rc = -1;
202 goto out;
203 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200204 }
205
Wang Nan09fa4f42016-05-23 07:13:40 +0000206 buf = &data[start & md->mask];
207 size = end - start;
208 start += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200209
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300210 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600211 rc = -1;
212 goto out;
213 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200214
Wang Nan09fa4f42016-05-23 07:13:40 +0000215 md->prev = head;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000216 perf_mmap__consume(md, overwrite || backward);
David Ahern8d3eca22012-08-26 12:24:47 -0600217out:
218 return rc;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200219}
220
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300221static volatile int done;
222static volatile int signr = -1;
223static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000224
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300225static void sig_handler(int sig)
226{
227 if (sig == SIGCHLD)
228 child_finished = 1;
229 else
230 signr = sig;
231
232 done = 1;
233}
234
Wang Nana0748652016-11-26 07:03:28 +0000235static void sigsegv_handler(int sig)
236{
237 perf_hooks__recover();
238 sighandler_dump_stack(sig);
239}
240
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300241static void record__sig_exit(void)
242{
243 if (signr == -1)
244 return;
245
246 signal(signr, SIG_DFL);
247 raise(signr);
248}
249
Adrian Huntere31f0d02015-04-30 17:37:27 +0300250#ifdef HAVE_AUXTRACE_SUPPORT
251
Adrian Hunteref149c22015-04-09 18:53:45 +0300252static int record__process_auxtrace(struct perf_tool *tool,
253 union perf_event *event, void *data1,
254 size_t len1, void *data2, size_t len2)
255{
256 struct record *rec = container_of(tool, struct record, tool);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300257 struct perf_data_file *file = &rec->file;
Adrian Hunteref149c22015-04-09 18:53:45 +0300258 size_t padding;
259 u8 pad[8] = {0};
260
Adrian Hunter99fa2982015-04-30 17:37:25 +0300261 if (!perf_data_file__is_pipe(file)) {
262 off_t file_offset;
263 int fd = perf_data_file__fd(file);
264 int err;
265
266 file_offset = lseek(fd, 0, SEEK_CUR);
267 if (file_offset == -1)
268 return -1;
269 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
270 event, file_offset);
271 if (err)
272 return err;
273 }
274
Adrian Hunteref149c22015-04-09 18:53:45 +0300275 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
276 padding = (len1 + len2) & 7;
277 if (padding)
278 padding = 8 - padding;
279
280 record__write(rec, event, event->header.size);
281 record__write(rec, data1, len1);
282 if (len2)
283 record__write(rec, data2, len2);
284 record__write(rec, &pad, padding);
285
286 return 0;
287}
288
289static int record__auxtrace_mmap_read(struct record *rec,
290 struct auxtrace_mmap *mm)
291{
292 int ret;
293
294 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
295 record__process_auxtrace);
296 if (ret < 0)
297 return ret;
298
299 if (ret)
300 rec->samples++;
301
302 return 0;
303}
304
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300305static int record__auxtrace_mmap_read_snapshot(struct record *rec,
306 struct auxtrace_mmap *mm)
307{
308 int ret;
309
310 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
311 record__process_auxtrace,
312 rec->opts.auxtrace_snapshot_size);
313 if (ret < 0)
314 return ret;
315
316 if (ret)
317 rec->samples++;
318
319 return 0;
320}
321
322static int record__auxtrace_read_snapshot_all(struct record *rec)
323{
324 int i;
325 int rc = 0;
326
327 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
328 struct auxtrace_mmap *mm =
329 &rec->evlist->mmap[i].auxtrace_mmap;
330
331 if (!mm->base)
332 continue;
333
334 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
335 rc = -1;
336 goto out;
337 }
338 }
339out:
340 return rc;
341}
342
343static void record__read_auxtrace_snapshot(struct record *rec)
344{
345 pr_debug("Recording AUX area tracing snapshot\n");
346 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000347 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300348 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000349 if (auxtrace_record__snapshot_finish(rec->itr))
350 trigger_error(&auxtrace_snapshot_trigger);
351 else
352 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300353 }
354}
355
Adrian Huntere31f0d02015-04-30 17:37:27 +0300356#else
357
358static inline
359int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
360 struct auxtrace_mmap *mm __maybe_unused)
361{
362 return 0;
363}
364
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300365static inline
366void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
367{
368}
369
370static inline
371int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
372{
373 return 0;
374}
375
Adrian Huntere31f0d02015-04-30 17:37:27 +0300376#endif
377
Wang Nancda57a82016-06-27 10:24:03 +0000378static int record__mmap_evlist(struct record *rec,
379 struct perf_evlist *evlist)
380{
381 struct record_opts *opts = &rec->opts;
382 char msg[512];
383
384 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
385 opts->auxtrace_mmap_pages,
386 opts->auxtrace_snapshot_mode) < 0) {
387 if (errno == EPERM) {
388 pr_err("Permission error mapping pages.\n"
389 "Consider increasing "
390 "/proc/sys/kernel/perf_event_mlock_kb,\n"
391 "or try again with a smaller value of -m/--mmap_pages.\n"
392 "(current value: %u,%u)\n",
393 opts->mmap_pages, opts->auxtrace_mmap_pages);
394 return -errno;
395 } else {
396 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300397 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000398 if (errno)
399 return -errno;
400 else
401 return -EINVAL;
402 }
403 }
404 return 0;
405}
406
407static int record__mmap(struct record *rec)
408{
409 return record__mmap_evlist(rec, rec->evlist);
410}
411
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300412static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200413{
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300414 char msg[512];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200415 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200416 struct perf_evlist *evlist = rec->evlist;
417 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300418 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600419 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600420 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200421
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300422 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100423
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300424 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200425try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400426 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300427 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300428 if (verbose)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300429 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300430 goto try_again;
431 }
David Ahernca6a4252011-03-25 13:11:11 -0600432
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300433 rc = -errno;
434 perf_evsel__open_strerror(pos, &opts->target,
435 errno, msg, sizeof(msg));
436 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600437 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300438 }
Li Zefanc171b552009-10-15 11:22:07 +0800439 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200440
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300441 if (perf_evlist__apply_filters(evlist, &pos)) {
442 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
443 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300444 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600445 rc = -1;
446 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100447 }
448
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600449 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
450 error("failed to set config \"%s\" on event %s with %d (%s)\n",
451 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
452 str_error_r(errno, msg, sizeof(msg)));
453 rc = -1;
454 goto out;
455 }
456
Wang Nancda57a82016-06-27 10:24:03 +0000457 rc = record__mmap(rec);
458 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600459 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200460
Jiri Olsa563aecb2013-06-05 13:35:06 +0200461 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300462 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600463out:
464 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200465}
466
Namhyung Kime3d59112015-01-29 17:06:44 +0900467static int process_sample_event(struct perf_tool *tool,
468 union perf_event *event,
469 struct perf_sample *sample,
470 struct perf_evsel *evsel,
471 struct machine *machine)
472{
473 struct record *rec = container_of(tool, struct record, tool);
474
475 rec->samples++;
476
477 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
478}
479
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300480static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200481{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200482 struct perf_data_file *file = &rec->file;
483 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200484
He Kuang457ae942015-05-28 13:17:30 +0000485 if (file->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300486 return 0;
487
Namhyung Kim00dc8652014-11-04 10:14:32 +0900488 /*
489 * During this process, it'll load kernel map and replace the
490 * dso->long_name to a real pathname it found. In this case
491 * we prefer the vmlinux path like
492 * /lib/modules/3.16.4/build/vmlinux
493 *
494 * rather than build-id path (in debug directory).
495 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
496 */
497 symbol_conf.ignore_vmlinux_buildid = true;
498
Namhyung Kim61566812016-01-11 22:37:09 +0900499 /*
500 * If --buildid-all is given, it marks all DSO regardless of hits,
501 * so no need to process samples.
502 */
503 if (rec->buildid_all)
504 rec->tool.sample = NULL;
505
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300506 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200507}
508
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200509static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800510{
511 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200512 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800513 /*
514 *As for guest kernel when processing subcommand record&report,
515 *we arrange module mmap prior to guest kernel mmap and trigger
516 *a preload dso because default guest module symbols are loaded
517 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
518 *method is used to avoid symbol missing when the first addr is
519 *in module instead of in guest kernel.
520 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200521 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200522 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800523 if (err < 0)
524 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300525 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800526
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800527 /*
528 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
529 * have no _text sometimes.
530 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200531 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200532 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800533 if (err < 0)
534 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300535 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800536}
537
Frederic Weisbecker98402802010-05-02 22:05:29 +0200538static struct perf_event_header finished_round_event = {
539 .size = sizeof(struct perf_event_header),
540 .type = PERF_RECORD_FINISHED_ROUND,
541};
542
Wang Nana4ea0ec2016-07-14 08:34:36 +0000543static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
544 bool backward)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200545{
Jiri Olsadcabb502014-07-25 16:56:16 +0200546 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200547 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600548 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000549 struct perf_mmap *maps;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200550
Wang Nancb216862016-06-27 10:24:04 +0000551 if (!evlist)
552 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300553
Wang Nanb2cb6152016-07-14 08:34:39 +0000554 maps = backward ? evlist->backward_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000555 if (!maps)
556 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000557
Wang Nan54cc54d2016-07-14 08:34:42 +0000558 if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
559 return 0;
560
Wang Nana4ea0ec2016-07-14 08:34:36 +0000561 for (i = 0; i < evlist->nr_mmaps; i++) {
562 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
563
564 if (maps[i].base) {
565 if (record__mmap_read(rec, &maps[i],
566 evlist->overwrite, backward) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600567 rc = -1;
568 goto out;
569 }
570 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300571
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300572 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunteref149c22015-04-09 18:53:45 +0300573 record__auxtrace_mmap_read(rec, mm) != 0) {
574 rc = -1;
575 goto out;
576 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200577 }
578
Jiri Olsadcabb502014-07-25 16:56:16 +0200579 /*
580 * Mark the round finished in case we wrote
581 * at least one event.
582 */
583 if (bytes_written != rec->bytes_written)
584 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600585
Wang Nan54cc54d2016-07-14 08:34:42 +0000586 if (backward)
587 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600588out:
589 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200590}
591
Wang Nancb216862016-06-27 10:24:04 +0000592static int record__mmap_read_all(struct record *rec)
593{
594 int err;
595
Wang Nana4ea0ec2016-07-14 08:34:36 +0000596 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000597 if (err)
598 return err;
599
Wang Nan057374642016-07-14 08:34:43 +0000600 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000601}
602
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300603static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700604{
David Ahern57706ab2013-11-06 11:41:34 -0700605 struct perf_session *session = rec->session;
606 int feat;
607
608 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
609 perf_header__set_feat(&session->header, feat);
610
611 if (rec->no_buildid)
612 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
613
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300614 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700615 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
616
617 if (!rec->opts.branch_stack)
618 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300619
620 if (!rec->opts.full_auxtrace)
621 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100622
623 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700624}
625
Wang Nane1ab48b2016-02-26 09:32:10 +0000626static void
627record__finish_output(struct record *rec)
628{
629 struct perf_data_file *file = &rec->file;
630 int fd = perf_data_file__fd(file);
631
632 if (file->is_pipe)
633 return;
634
635 rec->session->header.data_size += rec->bytes_written;
636 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
637
638 if (!rec->no_buildid) {
639 process_buildids(rec);
640
641 if (rec->buildid_all)
642 dsos__hit_all(rec->session);
643 }
644 perf_session__write_header(rec->session, rec->evlist, fd, true);
645
646 return;
647}
648
Wang Nan4ea648a2016-07-14 08:34:47 +0000649static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000650{
651 struct {
652 struct thread_map map;
653 struct thread_map_data map_data;
654 } thread_map;
655
Wang Nan4ea648a2016-07-14 08:34:47 +0000656 if (rec->opts.tail_synthesize != tail)
657 return 0;
658
Wang Nanbe7b0c92016-04-20 18:59:54 +0000659 thread_map.map.nr = 1;
660 thread_map.map.map[0].pid = rec->evlist->workload.pid;
661 thread_map.map.map[0].comm = NULL;
662 return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
663 process_synthesized_event,
664 &rec->session->machines.host,
665 rec->opts.sample_address,
666 rec->opts.proc_map_timeout);
667}
668
Wang Nan4ea648a2016-07-14 08:34:47 +0000669static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000670
Wang Nanecfd7a92016-04-13 08:21:07 +0000671static int
672record__switch_output(struct record *rec, bool at_exit)
673{
674 struct perf_data_file *file = &rec->file;
675 int fd, err;
676
677 /* Same Size: "2015122520103046"*/
678 char timestamp[] = "InvalidTimestamp";
679
Wang Nan4ea648a2016-07-14 08:34:47 +0000680 record__synthesize(rec, true);
681 if (target__none(&rec->opts.target))
682 record__synthesize_workload(rec, true);
683
Wang Nanecfd7a92016-04-13 08:21:07 +0000684 rec->samples = 0;
685 record__finish_output(rec);
686 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
687 if (err) {
688 pr_err("Failed to get current timestamp\n");
689 return -EINVAL;
690 }
691
692 fd = perf_data_file__switch(file, timestamp,
693 rec->session->header.data_offset,
694 at_exit);
695 if (fd >= 0 && !at_exit) {
696 rec->bytes_written = 0;
697 rec->session->header.data_size = 0;
698 }
699
700 if (!quiet)
701 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
702 file->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000703
704 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000705 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000706 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000707
Wang Nanbe7b0c92016-04-20 18:59:54 +0000708 /*
709 * In 'perf record --switch-output' without -a,
710 * record__synthesize() in record__switch_output() won't
711 * generate tracking events because there's no thread_map
712 * in evlist. Which causes newly created perf.data doesn't
713 * contain map and comm information.
714 * Create a fake thread_map and directly call
715 * perf_event__synthesize_thread_map() for those events.
716 */
717 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000718 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000719 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000720 return fd;
721}
722
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300723static volatile int workload_exec_errno;
724
725/*
726 * perf_evlist__prepare_workload will send a SIGUSR1
727 * if the fork fails, since we asked by setting its
728 * want_signal to true.
729 */
Namhyung Kim45604712014-05-12 09:47:24 +0900730static void workload_exec_failed_signal(int signo __maybe_unused,
731 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300732 void *ucontext __maybe_unused)
733{
734 workload_exec_errno = info->si_value.sival_int;
735 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300736 child_finished = 1;
737}
738
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300739static void snapshot_sig_handler(int sig);
740
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200741int __weak
742perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
743 struct perf_tool *tool __maybe_unused,
744 perf_event__handler_t process __maybe_unused,
745 struct machine *machine __maybe_unused)
746{
747 return 0;
748}
749
Wang Nanee667f92016-06-27 10:24:05 +0000750static const struct perf_event_mmap_page *
751perf_evlist__pick_pc(struct perf_evlist *evlist)
752{
Wang Nanb2cb6152016-07-14 08:34:39 +0000753 if (evlist) {
754 if (evlist->mmap && evlist->mmap[0].base)
755 return evlist->mmap[0].base;
756 if (evlist->backward_mmap && evlist->backward_mmap[0].base)
757 return evlist->backward_mmap[0].base;
758 }
Wang Nanee667f92016-06-27 10:24:05 +0000759 return NULL;
760}
761
Wang Nanc45628b2016-05-24 02:28:59 +0000762static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
763{
Wang Nanee667f92016-06-27 10:24:05 +0000764 const struct perf_event_mmap_page *pc;
765
766 pc = perf_evlist__pick_pc(rec->evlist);
767 if (pc)
768 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000769 return NULL;
770}
771
Wang Nan4ea648a2016-07-14 08:34:47 +0000772static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000773{
774 struct perf_session *session = rec->session;
775 struct machine *machine = &session->machines.host;
776 struct perf_data_file *file = &rec->file;
777 struct record_opts *opts = &rec->opts;
778 struct perf_tool *tool = &rec->tool;
779 int fd = perf_data_file__fd(file);
780 int err = 0;
781
Wang Nan4ea648a2016-07-14 08:34:47 +0000782 if (rec->opts.tail_synthesize != tail)
783 return 0;
784
Wang Nanc45c86e2016-02-26 09:32:07 +0000785 if (file->is_pipe) {
786 err = perf_event__synthesize_attrs(tool, session,
787 process_synthesized_event);
788 if (err < 0) {
789 pr_err("Couldn't synthesize attrs.\n");
790 goto out;
791 }
792
793 if (have_tracepoints(&rec->evlist->entries)) {
794 /*
795 * FIXME err <= 0 here actually means that
796 * there were no tracepoints so its not really
797 * an error, just that we don't need to
798 * synthesize anything. We really have to
799 * return this more properly and also
800 * propagate errors that now are calling die()
801 */
802 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
803 process_synthesized_event);
804 if (err <= 0) {
805 pr_err("Couldn't record tracing data.\n");
806 goto out;
807 }
808 rec->bytes_written += err;
809 }
810 }
811
Wang Nanc45628b2016-05-24 02:28:59 +0000812 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200813 process_synthesized_event, machine);
814 if (err)
815 goto out;
816
Wang Nanc45c86e2016-02-26 09:32:07 +0000817 if (rec->opts.full_auxtrace) {
818 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
819 session, process_synthesized_event);
820 if (err)
821 goto out;
822 }
823
824 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
825 machine);
826 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
827 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
828 "Check /proc/kallsyms permission or run as root.\n");
829
830 err = perf_event__synthesize_modules(tool, process_synthesized_event,
831 machine);
832 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
833 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
834 "Check /proc/modules permission or run as root.\n");
835
836 if (perf_guest) {
837 machines__process_guests(&session->machines,
838 perf_event__synthesize_guest_os, tool);
839 }
840
841 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
842 process_synthesized_event, opts->sample_address,
843 opts->proc_map_timeout);
844out:
845 return err;
846}
847
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300848static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200849{
David Ahern57706ab2013-11-06 11:41:34 -0700850 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900851 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200852 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300853 const bool forks = argc > 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300854 struct machine *machine;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200855 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300856 struct record_opts *opts = &rec->opts;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200857 struct perf_data_file *file = &rec->file;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200858 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300859 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900860 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200861
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200862 rec->progname = argv[0];
Andi Kleen33e49ea2011-09-15 14:31:40 -0700863
Namhyung Kim45604712014-05-12 09:47:24 +0900864 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200865 signal(SIGCHLD, sig_handler);
866 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600867 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +0000868 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000869
Jiri Olsadc0c6122017-01-09 10:51:58 +0100870 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300871 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000872 if (rec->opts.auxtrace_snapshot_mode)
873 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +0100874 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000875 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000876 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300877 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000878 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200879
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300880 session = perf_session__new(file, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200881 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900882 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200883 return -1;
884 }
885
Namhyung Kim42aa2762015-01-29 17:06:48 +0900886 fd = perf_data_file__fd(file);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200887 rec->session = session;
888
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300889 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100890
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200891 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300892 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200893 argv, file->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300894 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200895 if (err < 0) {
896 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900897 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200898 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200899 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100900 }
901
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300902 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600903 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900904 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600905 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200906
Wang Nan8690a2a2016-02-22 09:10:32 +0000907 err = bpf__apply_obj_config();
908 if (err) {
909 char errbuf[BUFSIZ];
910
911 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
912 pr_err("ERROR: Apply config to BPF failed: %s\n",
913 errbuf);
914 goto out_child;
915 }
916
Adrian Huntercca84822015-08-19 17:29:21 +0300917 /*
918 * Normally perf_session__new would do this, but it doesn't have the
919 * evlist.
920 */
921 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
922 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
923 rec->tool.ordered_events = false;
924 }
925
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300926 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900927 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
928
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200929 if (file->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900930 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500931 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900932 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200933 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900934 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200935 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900936 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200937 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200938
David Ahernd3665492012-02-06 15:27:52 -0700939 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100940 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700941 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100942 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600943 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900944 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100945 }
946
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300947 machine = &session->machines.host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200948
Wang Nan4ea648a2016-07-14 08:34:47 +0000949 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +0000950 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900951 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600952
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200953 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200954 struct sched_param param;
955
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200956 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200957 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200958 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600959 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900960 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200961 }
962 }
963
Jiri Olsa774cb492012-11-12 18:34:01 +0100964 /*
965 * When perf is starting the traced process, all the events
966 * (apart from group members) have enable_on_exec=1 set,
967 * so don't spoil it by prematurely enabling them.
968 */
Andi Kleen6619a532014-01-11 13:38:27 -0800969 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300970 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600971
Peter Zijlstra856e9662009-12-16 17:55:55 +0100972 /*
973 * Let the child rip
974 */
Namhyung Kime803cf92015-09-22 09:24:55 +0900975 if (forks) {
Namhyung Kime5bed562015-09-30 10:45:24 +0900976 union perf_event *event;
977
978 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
979 if (event == NULL) {
980 err = -ENOMEM;
981 goto out_child;
982 }
983
Namhyung Kime803cf92015-09-22 09:24:55 +0900984 /*
985 * Some H/W events are generated before COMM event
986 * which is emitted during exec(), so perf script
987 * cannot see a correct process name for those events.
988 * Synthesize COMM event to prevent it.
989 */
Namhyung Kime5bed562015-09-30 10:45:24 +0900990 perf_event__synthesize_comm(tool, event,
Namhyung Kime803cf92015-09-22 09:24:55 +0900991 rec->evlist->workload.pid,
992 process_synthesized_event,
993 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +0900994 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +0900995
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300996 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +0900997 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100998
Andi Kleen6619a532014-01-11 13:38:27 -0800999 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001000 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001001 perf_evlist__enable(rec->evlist);
1002 }
1003
Wang Nan5f9cf592016-04-20 18:59:49 +00001004 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001005 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001006 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001007 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001008 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001009
Wang Nan057374642016-07-14 08:34:43 +00001010 /*
1011 * rec->evlist->bkw_mmap_state is possible to be
1012 * BKW_MMAP_EMPTY here: when done == true and
1013 * hits != rec->samples in previous round.
1014 *
1015 * perf_evlist__toggle_bkw_mmap ensure we never
1016 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1017 */
1018 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1019 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1020
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001021 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001022 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001023 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001024 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001025 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001026 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001027
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001028 if (auxtrace_record__snapshot_started) {
1029 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001030 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001031 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001032 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001033 pr_err("AUX area tracing snapshot failed\n");
1034 err = -1;
1035 goto out_child;
1036 }
1037 }
1038
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001039 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001040 /*
1041 * If switch_output_trigger is hit, the data in
1042 * overwritable ring buffer should have been collected,
1043 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1044 *
1045 * If SIGUSR2 raise after or during record__mmap_read_all(),
1046 * record__mmap_read_all() didn't collect data from
1047 * overwritable ring buffer. Read again.
1048 */
1049 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1050 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001051 trigger_ready(&switch_output_trigger);
1052
Wang Nan057374642016-07-14 08:34:43 +00001053 /*
1054 * Reenable events in overwrite ring buffer after
1055 * record__mmap_read_all(): we should have collected
1056 * data from it.
1057 */
1058 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1059
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001060 if (!quiet)
1061 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1062 waking);
1063 waking = 0;
1064 fd = record__switch_output(rec, false);
1065 if (fd < 0) {
1066 pr_err("Failed to switch to new file\n");
1067 trigger_error(&switch_output_trigger);
1068 err = fd;
1069 goto out_child;
1070 }
1071 }
1072
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001073 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001074 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001075 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001076 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001077 /*
1078 * Propagate error, only if there's any. Ignore positive
1079 * number of returned events and interrupt error.
1080 */
1081 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001082 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001083 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001084
1085 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1086 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001087 }
1088
Jiri Olsa774cb492012-11-12 18:34:01 +01001089 /*
1090 * When perf is starting the traced process, at the end events
1091 * die with the process and we wait for that. Thus no need to
1092 * disable events in this case.
1093 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001094 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001095 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001096 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001097 disabled = true;
1098 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001099 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001100 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001101 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001102
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001103 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001104 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001105 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001106 pr_err("Workload failed: %s\n", emsg);
1107 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001108 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001109 }
1110
Namhyung Kime3d59112015-01-29 17:06:44 +09001111 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001112 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001113
Wang Nan4ea648a2016-07-14 08:34:47 +00001114 if (target__none(&rec->opts.target))
1115 record__synthesize_workload(rec, true);
1116
Namhyung Kim45604712014-05-12 09:47:24 +09001117out_child:
1118 if (forks) {
1119 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001120
Namhyung Kim45604712014-05-12 09:47:24 +09001121 if (!child_finished)
1122 kill(rec->evlist->workload.pid, SIGTERM);
1123
1124 wait(&exit_status);
1125
1126 if (err < 0)
1127 status = err;
1128 else if (WIFEXITED(exit_status))
1129 status = WEXITSTATUS(exit_status);
1130 else if (WIFSIGNALED(exit_status))
1131 signr = WTERMSIG(exit_status);
1132 } else
1133 status = err;
1134
Wang Nan4ea648a2016-07-14 08:34:47 +00001135 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001136 /* this will be recalculated during process_buildids() */
1137 rec->samples = 0;
1138
Wang Nanecfd7a92016-04-13 08:21:07 +00001139 if (!err) {
1140 if (!rec->timestamp_filename) {
1141 record__finish_output(rec);
1142 } else {
1143 fd = record__switch_output(rec, true);
1144 if (fd < 0) {
1145 status = fd;
1146 goto out_delete_session;
1147 }
1148 }
1149 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001150
Wang Nana0748652016-11-26 07:03:28 +00001151 perf_hooks__invoke_record_end();
1152
Namhyung Kime3d59112015-01-29 17:06:44 +09001153 if (!err && !quiet) {
1154 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001155 const char *postfix = rec->timestamp_filename ?
1156 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001157
Adrian Hunteref149c22015-04-09 18:53:45 +03001158 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001159 scnprintf(samples, sizeof(samples),
1160 " (%" PRIu64 " samples)", rec->samples);
1161 else
1162 samples[0] = '\0';
1163
Wang Nanecfd7a92016-04-13 08:21:07 +00001164 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Namhyung Kime3d59112015-01-29 17:06:44 +09001165 perf_data_file__size(file) / 1024.0 / 1024.0,
Wang Nanecfd7a92016-04-13 08:21:07 +00001166 file->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001167 }
1168
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001169out_delete_session:
1170 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001171 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001172}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001173
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001174static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001175{
Kan Liangaad2b212015-01-05 13:23:04 -05001176 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001177
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001178 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001179
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001180 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001181 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001182 callchain->dump_size);
1183}
1184
1185int record_opts__parse_callchain(struct record_opts *record,
1186 struct callchain_param *callchain,
1187 const char *arg, bool unset)
1188{
1189 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001190 callchain->enabled = !unset;
1191
1192 /* --no-call-graph */
1193 if (unset) {
1194 callchain->record_mode = CALLCHAIN_NONE;
1195 pr_debug("callchain: disabled\n");
1196 return 0;
1197 }
1198
1199 ret = parse_callchain_record_opt(arg, callchain);
1200 if (!ret) {
1201 /* Enable data address sampling for DWARF unwind. */
1202 if (callchain->record_mode == CALLCHAIN_DWARF)
1203 record->sample_address = true;
1204 callchain_debug(callchain);
1205 }
1206
1207 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001208}
1209
Kan Liangc421e802015-07-29 05:42:12 -04001210int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001211 const char *arg,
1212 int unset)
1213{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001214 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001215}
1216
Kan Liangc421e802015-07-29 05:42:12 -04001217int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001218 const char *arg __maybe_unused,
1219 int unset __maybe_unused)
1220{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001221 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001222
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001223 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001224
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001225 if (callchain->record_mode == CALLCHAIN_NONE)
1226 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001227
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001228 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001229 return 0;
1230}
1231
Jiri Olsaeb853e82014-02-03 12:44:42 +01001232static int perf_record_config(const char *var, const char *value, void *cb)
1233{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001234 struct record *rec = cb;
1235
1236 if (!strcmp(var, "record.build-id")) {
1237 if (!strcmp(value, "cache"))
1238 rec->no_buildid_cache = false;
1239 else if (!strcmp(value, "no-cache"))
1240 rec->no_buildid_cache = true;
1241 else if (!strcmp(value, "skip"))
1242 rec->no_buildid = true;
1243 else
1244 return -1;
1245 return 0;
1246 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001247 if (!strcmp(var, "record.call-graph"))
Namhyung Kim5a2e5e82014-09-23 10:01:44 +09001248 var = "call-graph.record-mode"; /* fall-through */
Jiri Olsaeb853e82014-02-03 12:44:42 +01001249
1250 return perf_default_config(var, value, cb);
1251}
1252
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001253struct clockid_map {
1254 const char *name;
1255 int clockid;
1256};
1257
1258#define CLOCKID_MAP(n, c) \
1259 { .name = n, .clockid = (c), }
1260
1261#define CLOCKID_END { .name = NULL, }
1262
1263
1264/*
1265 * Add the missing ones, we need to build on many distros...
1266 */
1267#ifndef CLOCK_MONOTONIC_RAW
1268#define CLOCK_MONOTONIC_RAW 4
1269#endif
1270#ifndef CLOCK_BOOTTIME
1271#define CLOCK_BOOTTIME 7
1272#endif
1273#ifndef CLOCK_TAI
1274#define CLOCK_TAI 11
1275#endif
1276
1277static const struct clockid_map clockids[] = {
1278 /* available for all events, NMI safe */
1279 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1280 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1281
1282 /* available for some events */
1283 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1284 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1285 CLOCKID_MAP("tai", CLOCK_TAI),
1286
1287 /* available for the lazy */
1288 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1289 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1290 CLOCKID_MAP("real", CLOCK_REALTIME),
1291 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1292
1293 CLOCKID_END,
1294};
1295
1296static int parse_clockid(const struct option *opt, const char *str, int unset)
1297{
1298 struct record_opts *opts = (struct record_opts *)opt->value;
1299 const struct clockid_map *cm;
1300 const char *ostr = str;
1301
1302 if (unset) {
1303 opts->use_clockid = 0;
1304 return 0;
1305 }
1306
1307 /* no arg passed */
1308 if (!str)
1309 return 0;
1310
1311 /* no setting it twice */
1312 if (opts->use_clockid)
1313 return -1;
1314
1315 opts->use_clockid = true;
1316
1317 /* if its a number, we're done */
1318 if (sscanf(str, "%d", &opts->clockid) == 1)
1319 return 0;
1320
1321 /* allow a "CLOCK_" prefix to the name */
1322 if (!strncasecmp(str, "CLOCK_", 6))
1323 str += 6;
1324
1325 for (cm = clockids; cm->name; cm++) {
1326 if (!strcasecmp(str, cm->name)) {
1327 opts->clockid = cm->clockid;
1328 return 0;
1329 }
1330 }
1331
1332 opts->use_clockid = false;
1333 ui__warning("unknown clockid %s, check man page\n", ostr);
1334 return -1;
1335}
1336
Adrian Huntere9db1312015-04-09 18:53:46 +03001337static int record__parse_mmap_pages(const struct option *opt,
1338 const char *str,
1339 int unset __maybe_unused)
1340{
1341 struct record_opts *opts = opt->value;
1342 char *s, *p;
1343 unsigned int mmap_pages;
1344 int ret;
1345
1346 if (!str)
1347 return -EINVAL;
1348
1349 s = strdup(str);
1350 if (!s)
1351 return -ENOMEM;
1352
1353 p = strchr(s, ',');
1354 if (p)
1355 *p = '\0';
1356
1357 if (*s) {
1358 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1359 if (ret)
1360 goto out_free;
1361 opts->mmap_pages = mmap_pages;
1362 }
1363
1364 if (!p) {
1365 ret = 0;
1366 goto out_free;
1367 }
1368
1369 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1370 if (ret)
1371 goto out_free;
1372
1373 opts->auxtrace_mmap_pages = mmap_pages;
1374
1375out_free:
1376 free(s);
1377 return ret;
1378}
1379
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001380static int switch_output_setup(struct record *rec)
1381{
1382 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001383 static struct parse_tag tags_size[] = {
1384 { .tag = 'B', .mult = 1 },
1385 { .tag = 'K', .mult = 1 << 10 },
1386 { .tag = 'M', .mult = 1 << 20 },
1387 { .tag = 'G', .mult = 1 << 30 },
1388 { .tag = 0 },
1389 };
1390 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001391
1392 if (!s->set)
1393 return 0;
1394
1395 if (!strcmp(s->str, "signal")) {
1396 s->signal = true;
1397 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001398 goto enabled;
1399 }
1400
1401 val = parse_tag_value(s->str, tags_size);
1402 if (val != (unsigned long) -1) {
1403 s->size = val;
1404 pr_debug("switch-output with %s size threshold\n", s->str);
1405 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001406 }
1407
1408 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001409
1410enabled:
1411 rec->timestamp_filename = true;
1412 s->enabled = true;
1413 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001414}
1415
Namhyung Kime5b2c202014-10-23 00:15:46 +09001416static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001417 "perf record [<options>] [<command>]",
1418 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001419 NULL
1420};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001421const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001422
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001423/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001424 * XXX Ideally would be local to cmd_record() and passed to a record__new
1425 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001426 * after cmd_record() exits, but since record_options need to be accessible to
1427 * builtin-script, leave it here.
1428 *
1429 * At least we don't ouch it in all the other functions here directly.
1430 *
1431 * Just say no to tons of global variables, sigh.
1432 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001433static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001434 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001435 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001436 .mmap_pages = UINT_MAX,
1437 .user_freq = UINT_MAX,
1438 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001439 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001440 .target = {
1441 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001442 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001443 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001444 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001445 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001446 .tool = {
1447 .sample = process_sample_event,
1448 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001449 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001450 .comm = perf_event__process_comm,
1451 .mmap = perf_event__process_mmap,
1452 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001453 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001454 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001455};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001456
Namhyung Kim76a26542015-10-22 23:28:32 +09001457const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1458 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001459
Wang Nan0aab2132016-06-16 08:02:41 +00001460static bool dry_run;
1461
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001462/*
1463 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1464 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001465 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001466 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1467 * using pipes, etc.
1468 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001469static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001470 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001471 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001472 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001473 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001474 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001475 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1476 NULL, "don't record events from perf itself",
1477 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001478 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001479 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001480 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001481 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001482 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001483 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001484 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001485 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001486 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001487 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001488 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001489 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001490 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001491 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001492 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001493 OPT_STRING('o', "output", &record.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001494 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001495 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1496 &record.opts.no_inherit_set,
1497 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001498 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1499 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001500 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001501 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
Adrian Huntere9db1312015-04-09 18:53:46 +03001502 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1503 "number of mmap data pages and AUX area tracing mmap pages",
1504 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001505 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001506 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001507 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001508 NULL, "enables call-graph recording" ,
1509 &record_callchain_opt),
1510 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001511 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001512 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001513 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001514 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001515 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001516 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001517 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001518 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001519 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001520 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1521 &record.opts.sample_time_set,
1522 "Record the sample timestamps"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001523 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001524 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001525 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001526 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1527 &record.no_buildid_cache_set,
1528 "do not update the buildid cache"),
1529 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1530 &record.no_buildid_set,
1531 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001532 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001533 "monitor event in cgroup name only",
1534 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001535 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001536 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001537 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1538 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001539
1540 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1541 "branch any", "sample any taken branches",
1542 parse_branch_stack),
1543
1544 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1545 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001546 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001547 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1548 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001549 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1550 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001551 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1552 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001553 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1554 "sample selected machine registers on interrupt,"
1555 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001556 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1557 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001558 OPT_CALLBACK('k', "clockid", &record.opts,
1559 "clockid", "clockid to use for events, see clock_gettime()",
1560 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001561 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1562 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001563 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1564 "per thread proc mmap processing timeout in ms"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001565 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1566 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001567 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1568 "Configure all used events to run in kernel space.",
1569 PARSE_OPT_EXCLUSIVE),
1570 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1571 "Configure all used events to run in user space.",
1572 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001573 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1574 "clang binary to use for compiling BPF scriptlets"),
1575 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1576 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001577 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1578 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001579 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1580 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001581 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1582 "append timestamp to output filename"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001583 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsadc0c6122017-01-09 10:51:58 +01001584 &record.switch_output.set, "signal,size",
1585 "Switch output when receive SIGUSR2 or cross size threshold",
1586 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001587 OPT_BOOLEAN(0, "dry-run", &dry_run,
1588 "Parse options then exit"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001589 OPT_END()
1590};
1591
Namhyung Kime5b2c202014-10-23 00:15:46 +09001592struct option *record_options = __record_options;
1593
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001594int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001595{
Adrian Hunteref149c22015-04-09 18:53:45 +03001596 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001597 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001598 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001599
Wang Nan48e1cab2015-12-14 10:39:22 +00001600#ifndef HAVE_LIBBPF_SUPPORT
1601# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1602 set_nobuild('\0', "clang-path", true);
1603 set_nobuild('\0', "clang-opt", true);
1604# undef set_nobuild
1605#endif
1606
He Kuang7efe0e02015-12-14 10:39:23 +00001607#ifndef HAVE_BPF_PROLOGUE
1608# if !defined (HAVE_DWARF_SUPPORT)
1609# define REASON "NO_DWARF=1"
1610# elif !defined (HAVE_LIBBPF_SUPPORT)
1611# define REASON "NO_LIBBPF=1"
1612# else
1613# define REASON "this architecture doesn't support BPF prologue"
1614# endif
1615# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1616 set_nobuild('\0', "vmlinux", true);
1617# undef set_nobuild
1618# undef REASON
1619#endif
1620
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001621 rec->evlist = perf_evlist__new();
1622 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001623 return -ENOMEM;
1624
Jiri Olsaeb853e82014-02-03 12:44:42 +01001625 perf_config(perf_record_config, rec);
1626
Tom Zanussibca647a2010-11-10 08:11:30 -06001627 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001628 PARSE_OPT_STOP_AT_NON_OPTION);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001629 if (!argc && target__none(&rec->opts.target))
Tom Zanussibca647a2010-11-10 08:11:30 -06001630 usage_with_options(record_usage, record_options);
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001631
Namhyung Kimbea03402012-04-26 14:15:15 +09001632 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001633 usage_with_options_msg(record_usage, record_options,
1634 "cgroup monitoring only available in system-wide mode");
1635
Stephane Eranian023695d2011-02-14 11:20:01 +02001636 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001637 if (rec->opts.record_switch_events &&
1638 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001639 ui__error("kernel does not support recording context switch events\n");
1640 parse_options_usage(record_usage, record_options, "switch-events", 0);
1641 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001642 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001643
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001644 if (switch_output_setup(rec)) {
1645 parse_options_usage(record_usage, record_options, "switch-output", 0);
1646 return -EINVAL;
1647 }
1648
Adrian Hunteref149c22015-04-09 18:53:45 +03001649 if (!rec->itr) {
1650 rec->itr = auxtrace_record__init(rec->evlist, &err);
1651 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001652 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001653 }
1654
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001655 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1656 rec->opts.auxtrace_snapshot_opts);
1657 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001658 goto out;
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001659
Adrian Hunter1b36c032016-09-23 17:38:39 +03001660 /*
1661 * Allow aliases to facilitate the lookup of symbols for address
1662 * filters. Refer to auxtrace_parse_filters().
1663 */
1664 symbol_conf.allow_aliases = true;
1665
1666 symbol__init(NULL);
1667
1668 err = auxtrace_parse_filters(rec->evlist);
1669 if (err)
1670 goto out;
1671
Wang Nan0aab2132016-06-16 08:02:41 +00001672 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001673 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00001674
Wang Nand7888572016-04-08 15:07:24 +00001675 err = bpf__setup_stdout(rec->evlist);
1676 if (err) {
1677 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1678 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1679 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001680 goto out;
Wang Nand7888572016-04-08 15:07:24 +00001681 }
1682
Adrian Hunteref149c22015-04-09 18:53:45 +03001683 err = -ENOMEM;
1684
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001685 if (symbol_conf.kptr_restrict)
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03001686 pr_warning(
1687"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1688"check /proc/sys/kernel/kptr_restrict.\n\n"
1689"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1690"file is not found in the buildid cache or in the vmlinux path.\n\n"
1691"Samples in kernel modules won't be resolved at all.\n\n"
1692"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1693"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001694
Wang Nan0c1d46a2016-04-20 18:59:52 +00001695 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001696 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01001697 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00001698 /*
1699 * In 'perf record --switch-output', disable buildid
1700 * generation by default to reduce data file switching
1701 * overhead. Still generate buildid if they are required
1702 * explicitly using
1703 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01001704 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00001705 * --no-no-buildid-cache
1706 *
1707 * Following code equals to:
1708 *
1709 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1710 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1711 * disable_buildid_cache();
1712 */
1713 bool disable = true;
1714
1715 if (rec->no_buildid_set && !rec->no_buildid)
1716 disable = false;
1717 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1718 disable = false;
1719 if (disable) {
1720 rec->no_buildid = true;
1721 rec->no_buildid_cache = true;
1722 disable_buildid_cache();
1723 }
1724 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001725
Wang Nan4ea648a2016-07-14 08:34:47 +00001726 if (record.opts.overwrite)
1727 record.opts.tail_synthesize = true;
1728
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001729 if (rec->evlist->nr_entries == 0 &&
1730 perf_evlist__add_default(rec->evlist) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001731 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03001732 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001733 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001734
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001735 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1736 rec->opts.no_inherit = true;
1737
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001738 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001739 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001740 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001741 ui__warning("%s", errbuf);
1742 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001743
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001744 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001745 if (err) {
1746 int saved_errno = errno;
1747
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001748 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001749 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001750
1751 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001752 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001753 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001754
Jiri Olsa23dc4f12016-12-12 11:35:43 +01001755 /* Enable ignoring missing threads when -u option is defined. */
1756 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1757
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001758 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001759 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001760 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001761
Adrian Hunteref149c22015-04-09 18:53:45 +03001762 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1763 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03001764 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001765
Namhyung Kim61566812016-01-11 22:37:09 +09001766 /*
1767 * We take all buildids when the file contains
1768 * AUX area tracing data because we do not decode the
1769 * trace because it would take too long.
1770 */
1771 if (rec->opts.full_auxtrace)
1772 rec->buildid_all = true;
1773
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001774 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001775 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001776 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001777 }
1778
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001779 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03001780out:
Namhyung Kim45604712014-05-12 09:47:24 +09001781 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001782 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001783 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001784 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001785}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001786
1787static void snapshot_sig_handler(int sig __maybe_unused)
1788{
Jiri Olsadc0c6122017-01-09 10:51:58 +01001789 struct record *rec = &record;
1790
Wang Nan5f9cf592016-04-20 18:59:49 +00001791 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1792 trigger_hit(&auxtrace_snapshot_trigger);
1793 auxtrace_record__snapshot_started = 1;
1794 if (auxtrace_record__snapshot_start(record.itr))
1795 trigger_error(&auxtrace_snapshot_trigger);
1796 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001797
Jiri Olsadc0c6122017-01-09 10:51:58 +01001798 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001799 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001800}