blob: 3191ab0638525928f16a71a6e78d2944f7f1cd81 [file] [log] [blame]
Ingo Molnarabaff322009-06-02 22:59:57 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02007 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +02009
10#include "perf.h"
11
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020012#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020013#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060014#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090016#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020017
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030018#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030019#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020020#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020021#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020022#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020023#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020024#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060025#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Wang Nand8871ea2016-02-26 09:32:06 +000041#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020042
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020043#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020044#include <sched.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030045#include <sys/mman.h>
Wang Nan2d11c652016-05-23 07:13:39 +000046#include <asm/bug.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030047#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030048
Jiri Olsa1b43b702017-01-09 10:51:56 +010049struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010050 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010051 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010052 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010053 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010054 const char *str;
55 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010056};
57
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030058struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020059 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030060 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020061 u64 bytes_written;
Jiri Olsaf5fc14122013-10-15 16:27:32 +020062 struct perf_data_file file;
Adrian Hunteref149c22015-04-09 18:53:45 +030063 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020064 struct perf_evlist *evlist;
65 struct perf_session *session;
66 const char *progname;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020067 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020068 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000069 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020070 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000071 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090072 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000073 bool timestamp_filename;
Jiri Olsa1b43b702017-01-09 10:51:56 +010074 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070075 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020076};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020077
Jiri Olsadc0c6122017-01-09 10:51:58 +010078static volatile int auxtrace_record__snapshot_started;
79static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
80static DEFINE_TRIGGER(switch_output_trigger);
81
82static bool switch_output_signal(struct record *rec)
83{
84 return rec->switch_output.signal &&
85 trigger_is_ready(&switch_output_trigger);
86}
87
88static bool switch_output_size(struct record *rec)
89{
90 return rec->switch_output.size &&
91 trigger_is_ready(&switch_output_trigger) &&
92 (rec->bytes_written >= rec->switch_output.size);
93}
94
Jiri Olsabfacbe32017-01-09 10:52:00 +010095static bool switch_output_time(struct record *rec)
96{
97 return rec->switch_output.time &&
98 trigger_is_ready(&switch_output_trigger);
99}
100
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300101static int record__write(struct record *rec, void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200102{
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300103 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100104 pr_err("failed to write perf data, error: %m\n");
105 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200106 }
David Ahern8d3eca22012-08-26 12:24:47 -0600107
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300108 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100109
110 if (switch_output_size(rec))
111 trigger_hit(&switch_output_trigger);
112
David Ahern8d3eca22012-08-26 12:24:47 -0600113 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200114}
115
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200116static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200117 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300118 struct perf_sample *sample __maybe_unused,
119 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200120{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300121 struct record *rec = container_of(tool, struct record, tool);
122 return record__write(rec, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200123}
124
Wang Nan3a62a7b2016-05-23 07:13:41 +0000125static int
126backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
127{
128 struct perf_event_header *pheader;
129 u64 evt_head = head;
130 int size = mask + 1;
131
132 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
133 pheader = (struct perf_event_header *)(buf + (head & mask));
134 *start = head;
135 while (true) {
136 if (evt_head - head >= (unsigned int)size) {
Colin Ian King5e30d552016-08-22 19:30:08 +0100137 pr_debug("Finished reading backward ring buffer: rewind\n");
Wang Nan3a62a7b2016-05-23 07:13:41 +0000138 if (evt_head - head > (unsigned int)size)
139 evt_head -= pheader->size;
140 *end = evt_head;
141 return 0;
142 }
143
144 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
145
146 if (pheader->size == 0) {
Colin Ian King5e30d552016-08-22 19:30:08 +0100147 pr_debug("Finished reading backward ring buffer: get start\n");
Wang Nan3a62a7b2016-05-23 07:13:41 +0000148 *end = evt_head;
149 return 0;
150 }
151
152 evt_head += pheader->size;
153 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
154 }
155 WARN_ONCE(1, "Shouldn't get here\n");
156 return -1;
157}
158
159static int
Wang Nana4ea0ec2016-07-14 08:34:36 +0000160rb_find_range(void *data, int mask, u64 head, u64 old,
161 u64 *start, u64 *end, bool backward)
Wang Nan3a62a7b2016-05-23 07:13:41 +0000162{
Wang Nana4ea0ec2016-07-14 08:34:36 +0000163 if (!backward) {
Wang Nan3a62a7b2016-05-23 07:13:41 +0000164 *start = old;
165 *end = head;
166 return 0;
167 }
168
169 return backward_rb_find_range(data, mask, head, start, end);
170}
171
Wang Nana4ea0ec2016-07-14 08:34:36 +0000172static int
173record__mmap_read(struct record *rec, struct perf_mmap *md,
174 bool overwrite, bool backward)
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200175{
David Ahern7b8283b52015-04-07 09:20:37 -0600176 u64 head = perf_mmap__read_head(md);
177 u64 old = md->prev;
Wang Nan09fa4f42016-05-23 07:13:40 +0000178 u64 end = head, start = old;
Jiri Olsa918512b2013-09-12 18:39:35 +0200179 unsigned char *data = md->base + page_size;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200180 unsigned long size;
181 void *buf;
David Ahern8d3eca22012-08-26 12:24:47 -0600182 int rc = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200183
Wang Nana4ea0ec2016-07-14 08:34:36 +0000184 if (rb_find_range(data, md->mask, head,
185 old, &start, &end, backward))
Wang Nan3a62a7b2016-05-23 07:13:41 +0000186 return -1;
187
Wang Nan09fa4f42016-05-23 07:13:40 +0000188 if (start == end)
David Ahern8d3eca22012-08-26 12:24:47 -0600189 return 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200190
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200191 rec->samples++;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200192
Wang Nan09fa4f42016-05-23 07:13:40 +0000193 size = end - start;
Wang Nan2d11c652016-05-23 07:13:39 +0000194 if (size > (unsigned long)(md->mask) + 1) {
195 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
196
197 md->prev = head;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000198 perf_mmap__consume(md, overwrite || backward);
Wang Nan2d11c652016-05-23 07:13:39 +0000199 return 0;
200 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200201
Wang Nan09fa4f42016-05-23 07:13:40 +0000202 if ((start & md->mask) + size != (end & md->mask)) {
203 buf = &data[start & md->mask];
204 size = md->mask + 1 - (start & md->mask);
205 start += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200206
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300207 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600208 rc = -1;
209 goto out;
210 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200211 }
212
Wang Nan09fa4f42016-05-23 07:13:40 +0000213 buf = &data[start & md->mask];
214 size = end - start;
215 start += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200216
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300217 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600218 rc = -1;
219 goto out;
220 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200221
Wang Nan09fa4f42016-05-23 07:13:40 +0000222 md->prev = head;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000223 perf_mmap__consume(md, overwrite || backward);
David Ahern8d3eca22012-08-26 12:24:47 -0600224out:
225 return rc;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200226}
227
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300228static volatile int done;
229static volatile int signr = -1;
230static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000231
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300232static void sig_handler(int sig)
233{
234 if (sig == SIGCHLD)
235 child_finished = 1;
236 else
237 signr = sig;
238
239 done = 1;
240}
241
Wang Nana0748652016-11-26 07:03:28 +0000242static void sigsegv_handler(int sig)
243{
244 perf_hooks__recover();
245 sighandler_dump_stack(sig);
246}
247
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300248static void record__sig_exit(void)
249{
250 if (signr == -1)
251 return;
252
253 signal(signr, SIG_DFL);
254 raise(signr);
255}
256
Adrian Huntere31f0d02015-04-30 17:37:27 +0300257#ifdef HAVE_AUXTRACE_SUPPORT
258
Adrian Hunteref149c22015-04-09 18:53:45 +0300259static int record__process_auxtrace(struct perf_tool *tool,
260 union perf_event *event, void *data1,
261 size_t len1, void *data2, size_t len2)
262{
263 struct record *rec = container_of(tool, struct record, tool);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300264 struct perf_data_file *file = &rec->file;
Adrian Hunteref149c22015-04-09 18:53:45 +0300265 size_t padding;
266 u8 pad[8] = {0};
267
Adrian Hunter99fa2982015-04-30 17:37:25 +0300268 if (!perf_data_file__is_pipe(file)) {
269 off_t file_offset;
270 int fd = perf_data_file__fd(file);
271 int err;
272
273 file_offset = lseek(fd, 0, SEEK_CUR);
274 if (file_offset == -1)
275 return -1;
276 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
277 event, file_offset);
278 if (err)
279 return err;
280 }
281
Adrian Hunteref149c22015-04-09 18:53:45 +0300282 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
283 padding = (len1 + len2) & 7;
284 if (padding)
285 padding = 8 - padding;
286
287 record__write(rec, event, event->header.size);
288 record__write(rec, data1, len1);
289 if (len2)
290 record__write(rec, data2, len2);
291 record__write(rec, &pad, padding);
292
293 return 0;
294}
295
296static int record__auxtrace_mmap_read(struct record *rec,
297 struct auxtrace_mmap *mm)
298{
299 int ret;
300
301 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
302 record__process_auxtrace);
303 if (ret < 0)
304 return ret;
305
306 if (ret)
307 rec->samples++;
308
309 return 0;
310}
311
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300312static int record__auxtrace_mmap_read_snapshot(struct record *rec,
313 struct auxtrace_mmap *mm)
314{
315 int ret;
316
317 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
318 record__process_auxtrace,
319 rec->opts.auxtrace_snapshot_size);
320 if (ret < 0)
321 return ret;
322
323 if (ret)
324 rec->samples++;
325
326 return 0;
327}
328
329static int record__auxtrace_read_snapshot_all(struct record *rec)
330{
331 int i;
332 int rc = 0;
333
334 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
335 struct auxtrace_mmap *mm =
336 &rec->evlist->mmap[i].auxtrace_mmap;
337
338 if (!mm->base)
339 continue;
340
341 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
342 rc = -1;
343 goto out;
344 }
345 }
346out:
347 return rc;
348}
349
350static void record__read_auxtrace_snapshot(struct record *rec)
351{
352 pr_debug("Recording AUX area tracing snapshot\n");
353 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000354 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300355 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000356 if (auxtrace_record__snapshot_finish(rec->itr))
357 trigger_error(&auxtrace_snapshot_trigger);
358 else
359 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300360 }
361}
362
Adrian Huntere31f0d02015-04-30 17:37:27 +0300363#else
364
365static inline
366int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
367 struct auxtrace_mmap *mm __maybe_unused)
368{
369 return 0;
370}
371
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300372static inline
373void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
374{
375}
376
377static inline
378int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
379{
380 return 0;
381}
382
Adrian Huntere31f0d02015-04-30 17:37:27 +0300383#endif
384
Wang Nancda57a82016-06-27 10:24:03 +0000385static int record__mmap_evlist(struct record *rec,
386 struct perf_evlist *evlist)
387{
388 struct record_opts *opts = &rec->opts;
389 char msg[512];
390
391 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
392 opts->auxtrace_mmap_pages,
393 opts->auxtrace_snapshot_mode) < 0) {
394 if (errno == EPERM) {
395 pr_err("Permission error mapping pages.\n"
396 "Consider increasing "
397 "/proc/sys/kernel/perf_event_mlock_kb,\n"
398 "or try again with a smaller value of -m/--mmap_pages.\n"
399 "(current value: %u,%u)\n",
400 opts->mmap_pages, opts->auxtrace_mmap_pages);
401 return -errno;
402 } else {
403 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300404 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000405 if (errno)
406 return -errno;
407 else
408 return -EINVAL;
409 }
410 }
411 return 0;
412}
413
414static int record__mmap(struct record *rec)
415{
416 return record__mmap_evlist(rec, rec->evlist);
417}
418
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300419static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200420{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300421 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200422 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200423 struct perf_evlist *evlist = rec->evlist;
424 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300425 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600426 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600427 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200428
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300429 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100430
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300431 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200432try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400433 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300434 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900435 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300436 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300437 goto try_again;
438 }
David Ahernca6a4252011-03-25 13:11:11 -0600439
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300440 rc = -errno;
441 perf_evsel__open_strerror(pos, &opts->target,
442 errno, msg, sizeof(msg));
443 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600444 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300445 }
Li Zefanc171b552009-10-15 11:22:07 +0800446 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200447
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300448 if (perf_evlist__apply_filters(evlist, &pos)) {
449 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
450 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300451 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600452 rc = -1;
453 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100454 }
455
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600456 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
457 error("failed to set config \"%s\" on event %s with %d (%s)\n",
458 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
459 str_error_r(errno, msg, sizeof(msg)));
460 rc = -1;
461 goto out;
462 }
463
Wang Nancda57a82016-06-27 10:24:03 +0000464 rc = record__mmap(rec);
465 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600466 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200467
Jiri Olsa563aecb2013-06-05 13:35:06 +0200468 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300469 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600470out:
471 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200472}
473
Namhyung Kime3d59112015-01-29 17:06:44 +0900474static int process_sample_event(struct perf_tool *tool,
475 union perf_event *event,
476 struct perf_sample *sample,
477 struct perf_evsel *evsel,
478 struct machine *machine)
479{
480 struct record *rec = container_of(tool, struct record, tool);
481
482 rec->samples++;
483
484 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
485}
486
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300487static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200488{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200489 struct perf_data_file *file = &rec->file;
490 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200491
He Kuang457ae942015-05-28 13:17:30 +0000492 if (file->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300493 return 0;
494
Namhyung Kim00dc8652014-11-04 10:14:32 +0900495 /*
496 * During this process, it'll load kernel map and replace the
497 * dso->long_name to a real pathname it found. In this case
498 * we prefer the vmlinux path like
499 * /lib/modules/3.16.4/build/vmlinux
500 *
501 * rather than build-id path (in debug directory).
502 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
503 */
504 symbol_conf.ignore_vmlinux_buildid = true;
505
Namhyung Kim61566812016-01-11 22:37:09 +0900506 /*
507 * If --buildid-all is given, it marks all DSO regardless of hits,
508 * so no need to process samples.
509 */
510 if (rec->buildid_all)
511 rec->tool.sample = NULL;
512
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300513 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200514}
515
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200516static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800517{
518 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200519 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800520 /*
521 *As for guest kernel when processing subcommand record&report,
522 *we arrange module mmap prior to guest kernel mmap and trigger
523 *a preload dso because default guest module symbols are loaded
524 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
525 *method is used to avoid symbol missing when the first addr is
526 *in module instead of in guest kernel.
527 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200528 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200529 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800530 if (err < 0)
531 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300532 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800533
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800534 /*
535 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
536 * have no _text sometimes.
537 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200538 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200539 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800540 if (err < 0)
541 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300542 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800543}
544
Frederic Weisbecker98402802010-05-02 22:05:29 +0200545static struct perf_event_header finished_round_event = {
546 .size = sizeof(struct perf_event_header),
547 .type = PERF_RECORD_FINISHED_ROUND,
548};
549
Wang Nana4ea0ec2016-07-14 08:34:36 +0000550static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
551 bool backward)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200552{
Jiri Olsadcabb502014-07-25 16:56:16 +0200553 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200554 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600555 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000556 struct perf_mmap *maps;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200557
Wang Nancb216862016-06-27 10:24:04 +0000558 if (!evlist)
559 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300560
Wang Nanb2cb6152016-07-14 08:34:39 +0000561 maps = backward ? evlist->backward_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000562 if (!maps)
563 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000564
Wang Nan54cc54d2016-07-14 08:34:42 +0000565 if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
566 return 0;
567
Wang Nana4ea0ec2016-07-14 08:34:36 +0000568 for (i = 0; i < evlist->nr_mmaps; i++) {
569 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
570
571 if (maps[i].base) {
572 if (record__mmap_read(rec, &maps[i],
573 evlist->overwrite, backward) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600574 rc = -1;
575 goto out;
576 }
577 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300578
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300579 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunteref149c22015-04-09 18:53:45 +0300580 record__auxtrace_mmap_read(rec, mm) != 0) {
581 rc = -1;
582 goto out;
583 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200584 }
585
Jiri Olsadcabb502014-07-25 16:56:16 +0200586 /*
587 * Mark the round finished in case we wrote
588 * at least one event.
589 */
590 if (bytes_written != rec->bytes_written)
591 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600592
Wang Nan54cc54d2016-07-14 08:34:42 +0000593 if (backward)
594 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600595out:
596 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200597}
598
Wang Nancb216862016-06-27 10:24:04 +0000599static int record__mmap_read_all(struct record *rec)
600{
601 int err;
602
Wang Nana4ea0ec2016-07-14 08:34:36 +0000603 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000604 if (err)
605 return err;
606
Wang Nan057374642016-07-14 08:34:43 +0000607 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000608}
609
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300610static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700611{
David Ahern57706ab2013-11-06 11:41:34 -0700612 struct perf_session *session = rec->session;
613 int feat;
614
615 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
616 perf_header__set_feat(&session->header, feat);
617
618 if (rec->no_buildid)
619 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
620
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300621 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700622 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
623
624 if (!rec->opts.branch_stack)
625 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300626
627 if (!rec->opts.full_auxtrace)
628 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100629
630 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700631}
632
Wang Nane1ab48b2016-02-26 09:32:10 +0000633static void
634record__finish_output(struct record *rec)
635{
636 struct perf_data_file *file = &rec->file;
637 int fd = perf_data_file__fd(file);
638
639 if (file->is_pipe)
640 return;
641
642 rec->session->header.data_size += rec->bytes_written;
643 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
644
645 if (!rec->no_buildid) {
646 process_buildids(rec);
647
648 if (rec->buildid_all)
649 dsos__hit_all(rec->session);
650 }
651 perf_session__write_header(rec->session, rec->evlist, fd, true);
652
653 return;
654}
655
Wang Nan4ea648a2016-07-14 08:34:47 +0000656static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000657{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300658 int err;
659 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000660
Wang Nan4ea648a2016-07-14 08:34:47 +0000661 if (rec->opts.tail_synthesize != tail)
662 return 0;
663
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300664 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
665 if (thread_map == NULL)
666 return -1;
667
668 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000669 process_synthesized_event,
670 &rec->session->machines.host,
671 rec->opts.sample_address,
672 rec->opts.proc_map_timeout);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300673 thread_map__put(thread_map);
674 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000675}
676
Wang Nan4ea648a2016-07-14 08:34:47 +0000677static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000678
Wang Nanecfd7a92016-04-13 08:21:07 +0000679static int
680record__switch_output(struct record *rec, bool at_exit)
681{
682 struct perf_data_file *file = &rec->file;
683 int fd, err;
684
685 /* Same Size: "2015122520103046"*/
686 char timestamp[] = "InvalidTimestamp";
687
Wang Nan4ea648a2016-07-14 08:34:47 +0000688 record__synthesize(rec, true);
689 if (target__none(&rec->opts.target))
690 record__synthesize_workload(rec, true);
691
Wang Nanecfd7a92016-04-13 08:21:07 +0000692 rec->samples = 0;
693 record__finish_output(rec);
694 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
695 if (err) {
696 pr_err("Failed to get current timestamp\n");
697 return -EINVAL;
698 }
699
700 fd = perf_data_file__switch(file, timestamp,
701 rec->session->header.data_offset,
702 at_exit);
703 if (fd >= 0 && !at_exit) {
704 rec->bytes_written = 0;
705 rec->session->header.data_size = 0;
706 }
707
708 if (!quiet)
709 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
710 file->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000711
712 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000713 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000714 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000715
Wang Nanbe7b0c92016-04-20 18:59:54 +0000716 /*
717 * In 'perf record --switch-output' without -a,
718 * record__synthesize() in record__switch_output() won't
719 * generate tracking events because there's no thread_map
720 * in evlist. Which causes newly created perf.data doesn't
721 * contain map and comm information.
722 * Create a fake thread_map and directly call
723 * perf_event__synthesize_thread_map() for those events.
724 */
725 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000726 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000727 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000728 return fd;
729}
730
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300731static volatile int workload_exec_errno;
732
733/*
734 * perf_evlist__prepare_workload will send a SIGUSR1
735 * if the fork fails, since we asked by setting its
736 * want_signal to true.
737 */
Namhyung Kim45604712014-05-12 09:47:24 +0900738static void workload_exec_failed_signal(int signo __maybe_unused,
739 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300740 void *ucontext __maybe_unused)
741{
742 workload_exec_errno = info->si_value.sival_int;
743 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300744 child_finished = 1;
745}
746
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300747static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100748static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300749
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200750int __weak
751perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
752 struct perf_tool *tool __maybe_unused,
753 perf_event__handler_t process __maybe_unused,
754 struct machine *machine __maybe_unused)
755{
756 return 0;
757}
758
Wang Nanee667f92016-06-27 10:24:05 +0000759static const struct perf_event_mmap_page *
760perf_evlist__pick_pc(struct perf_evlist *evlist)
761{
Wang Nanb2cb6152016-07-14 08:34:39 +0000762 if (evlist) {
763 if (evlist->mmap && evlist->mmap[0].base)
764 return evlist->mmap[0].base;
765 if (evlist->backward_mmap && evlist->backward_mmap[0].base)
766 return evlist->backward_mmap[0].base;
767 }
Wang Nanee667f92016-06-27 10:24:05 +0000768 return NULL;
769}
770
Wang Nanc45628b2016-05-24 02:28:59 +0000771static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
772{
Wang Nanee667f92016-06-27 10:24:05 +0000773 const struct perf_event_mmap_page *pc;
774
775 pc = perf_evlist__pick_pc(rec->evlist);
776 if (pc)
777 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000778 return NULL;
779}
780
Wang Nan4ea648a2016-07-14 08:34:47 +0000781static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000782{
783 struct perf_session *session = rec->session;
784 struct machine *machine = &session->machines.host;
785 struct perf_data_file *file = &rec->file;
786 struct record_opts *opts = &rec->opts;
787 struct perf_tool *tool = &rec->tool;
788 int fd = perf_data_file__fd(file);
789 int err = 0;
790
Wang Nan4ea648a2016-07-14 08:34:47 +0000791 if (rec->opts.tail_synthesize != tail)
792 return 0;
793
Wang Nanc45c86e2016-02-26 09:32:07 +0000794 if (file->is_pipe) {
795 err = perf_event__synthesize_attrs(tool, session,
796 process_synthesized_event);
797 if (err < 0) {
798 pr_err("Couldn't synthesize attrs.\n");
799 goto out;
800 }
801
802 if (have_tracepoints(&rec->evlist->entries)) {
803 /*
804 * FIXME err <= 0 here actually means that
805 * there were no tracepoints so its not really
806 * an error, just that we don't need to
807 * synthesize anything. We really have to
808 * return this more properly and also
809 * propagate errors that now are calling die()
810 */
811 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
812 process_synthesized_event);
813 if (err <= 0) {
814 pr_err("Couldn't record tracing data.\n");
815 goto out;
816 }
817 rec->bytes_written += err;
818 }
819 }
820
Wang Nanc45628b2016-05-24 02:28:59 +0000821 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200822 process_synthesized_event, machine);
823 if (err)
824 goto out;
825
Wang Nanc45c86e2016-02-26 09:32:07 +0000826 if (rec->opts.full_auxtrace) {
827 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
828 session, process_synthesized_event);
829 if (err)
830 goto out;
831 }
832
833 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
834 machine);
835 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
836 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
837 "Check /proc/kallsyms permission or run as root.\n");
838
839 err = perf_event__synthesize_modules(tool, process_synthesized_event,
840 machine);
841 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
842 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
843 "Check /proc/modules permission or run as root.\n");
844
845 if (perf_guest) {
846 machines__process_guests(&session->machines,
847 perf_event__synthesize_guest_os, tool);
848 }
849
850 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
851 process_synthesized_event, opts->sample_address,
852 opts->proc_map_timeout);
853out:
854 return err;
855}
856
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300857static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200858{
David Ahern57706ab2013-11-06 11:41:34 -0700859 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900860 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200861 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300862 const bool forks = argc > 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300863 struct machine *machine;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200864 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300865 struct record_opts *opts = &rec->opts;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200866 struct perf_data_file *file = &rec->file;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200867 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300868 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900869 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200870
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200871 rec->progname = argv[0];
Andi Kleen33e49ea2011-09-15 14:31:40 -0700872
Namhyung Kim45604712014-05-12 09:47:24 +0900873 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200874 signal(SIGCHLD, sig_handler);
875 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600876 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +0000877 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000878
Hari Bathinif3b36142017-03-08 02:11:43 +0530879 if (rec->opts.record_namespaces)
880 tool->namespace_events = true;
881
Jiri Olsadc0c6122017-01-09 10:51:58 +0100882 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300883 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000884 if (rec->opts.auxtrace_snapshot_mode)
885 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +0100886 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000887 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000888 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300889 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000890 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200891
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300892 session = perf_session__new(file, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200893 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900894 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200895 return -1;
896 }
897
Namhyung Kim42aa2762015-01-29 17:06:48 +0900898 fd = perf_data_file__fd(file);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200899 rec->session = session;
900
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300901 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100902
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200903 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300904 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200905 argv, file->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300906 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200907 if (err < 0) {
908 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900909 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200910 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200911 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100912 }
913
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300914 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600915 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900916 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600917 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200918
Wang Nan8690a2a2016-02-22 09:10:32 +0000919 err = bpf__apply_obj_config();
920 if (err) {
921 char errbuf[BUFSIZ];
922
923 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
924 pr_err("ERROR: Apply config to BPF failed: %s\n",
925 errbuf);
926 goto out_child;
927 }
928
Adrian Huntercca84822015-08-19 17:29:21 +0300929 /*
930 * Normally perf_session__new would do this, but it doesn't have the
931 * evlist.
932 */
933 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
934 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
935 rec->tool.ordered_events = false;
936 }
937
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300938 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900939 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
940
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200941 if (file->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900942 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500943 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900944 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200945 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900946 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200947 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900948 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200949 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200950
David Ahernd3665492012-02-06 15:27:52 -0700951 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100952 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700953 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100954 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600955 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900956 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100957 }
958
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300959 machine = &session->machines.host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200960
Wang Nan4ea648a2016-07-14 08:34:47 +0000961 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +0000962 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900963 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600964
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200965 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200966 struct sched_param param;
967
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200968 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200969 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200970 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600971 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900972 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200973 }
974 }
975
Jiri Olsa774cb492012-11-12 18:34:01 +0100976 /*
977 * When perf is starting the traced process, all the events
978 * (apart from group members) have enable_on_exec=1 set,
979 * so don't spoil it by prematurely enabling them.
980 */
Andi Kleen6619a532014-01-11 13:38:27 -0800981 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300982 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600983
Peter Zijlstra856e9662009-12-16 17:55:55 +0100984 /*
985 * Let the child rip
986 */
Namhyung Kime803cf92015-09-22 09:24:55 +0900987 if (forks) {
Namhyung Kime5bed562015-09-30 10:45:24 +0900988 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +0530989 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +0900990
991 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
992 if (event == NULL) {
993 err = -ENOMEM;
994 goto out_child;
995 }
996
Namhyung Kime803cf92015-09-22 09:24:55 +0900997 /*
998 * Some H/W events are generated before COMM event
999 * which is emitted during exec(), so perf script
1000 * cannot see a correct process name for those events.
1001 * Synthesize COMM event to prevent it.
1002 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301003 tgid = perf_event__synthesize_comm(tool, event,
1004 rec->evlist->workload.pid,
1005 process_synthesized_event,
1006 machine);
1007 free(event);
1008
1009 if (tgid == -1)
1010 goto out_child;
1011
1012 event = malloc(sizeof(event->namespaces) +
1013 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1014 machine->id_hdr_size);
1015 if (event == NULL) {
1016 err = -ENOMEM;
1017 goto out_child;
1018 }
1019
1020 /*
1021 * Synthesize NAMESPACES event for the command specified.
1022 */
1023 perf_event__synthesize_namespaces(tool, event,
1024 rec->evlist->workload.pid,
1025 tgid, process_synthesized_event,
1026 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001027 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001028
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001029 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001030 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001031
Andi Kleen6619a532014-01-11 13:38:27 -08001032 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001033 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001034 perf_evlist__enable(rec->evlist);
1035 }
1036
Wang Nan5f9cf592016-04-20 18:59:49 +00001037 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001038 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001039 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001040 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001041 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001042
Wang Nan057374642016-07-14 08:34:43 +00001043 /*
1044 * rec->evlist->bkw_mmap_state is possible to be
1045 * BKW_MMAP_EMPTY here: when done == true and
1046 * hits != rec->samples in previous round.
1047 *
1048 * perf_evlist__toggle_bkw_mmap ensure we never
1049 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1050 */
1051 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1052 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1053
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001054 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001055 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001056 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001057 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001058 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001059 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001060
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001061 if (auxtrace_record__snapshot_started) {
1062 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001063 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001064 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001065 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001066 pr_err("AUX area tracing snapshot failed\n");
1067 err = -1;
1068 goto out_child;
1069 }
1070 }
1071
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001072 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001073 /*
1074 * If switch_output_trigger is hit, the data in
1075 * overwritable ring buffer should have been collected,
1076 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1077 *
1078 * If SIGUSR2 raise after or during record__mmap_read_all(),
1079 * record__mmap_read_all() didn't collect data from
1080 * overwritable ring buffer. Read again.
1081 */
1082 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1083 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001084 trigger_ready(&switch_output_trigger);
1085
Wang Nan057374642016-07-14 08:34:43 +00001086 /*
1087 * Reenable events in overwrite ring buffer after
1088 * record__mmap_read_all(): we should have collected
1089 * data from it.
1090 */
1091 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1092
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001093 if (!quiet)
1094 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1095 waking);
1096 waking = 0;
1097 fd = record__switch_output(rec, false);
1098 if (fd < 0) {
1099 pr_err("Failed to switch to new file\n");
1100 trigger_error(&switch_output_trigger);
1101 err = fd;
1102 goto out_child;
1103 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001104
1105 /* re-arm the alarm */
1106 if (rec->switch_output.time)
1107 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001108 }
1109
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001110 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001111 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001112 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001113 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001114 /*
1115 * Propagate error, only if there's any. Ignore positive
1116 * number of returned events and interrupt error.
1117 */
1118 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001119 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001120 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001121
1122 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1123 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001124 }
1125
Jiri Olsa774cb492012-11-12 18:34:01 +01001126 /*
1127 * When perf is starting the traced process, at the end events
1128 * die with the process and we wait for that. Thus no need to
1129 * disable events in this case.
1130 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001131 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001132 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001133 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001134 disabled = true;
1135 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001136 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001137 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001138 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001139
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001140 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001141 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001142 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001143 pr_err("Workload failed: %s\n", emsg);
1144 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001145 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001146 }
1147
Namhyung Kime3d59112015-01-29 17:06:44 +09001148 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001149 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001150
Wang Nan4ea648a2016-07-14 08:34:47 +00001151 if (target__none(&rec->opts.target))
1152 record__synthesize_workload(rec, true);
1153
Namhyung Kim45604712014-05-12 09:47:24 +09001154out_child:
1155 if (forks) {
1156 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001157
Namhyung Kim45604712014-05-12 09:47:24 +09001158 if (!child_finished)
1159 kill(rec->evlist->workload.pid, SIGTERM);
1160
1161 wait(&exit_status);
1162
1163 if (err < 0)
1164 status = err;
1165 else if (WIFEXITED(exit_status))
1166 status = WEXITSTATUS(exit_status);
1167 else if (WIFSIGNALED(exit_status))
1168 signr = WTERMSIG(exit_status);
1169 } else
1170 status = err;
1171
Wang Nan4ea648a2016-07-14 08:34:47 +00001172 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001173 /* this will be recalculated during process_buildids() */
1174 rec->samples = 0;
1175
Wang Nanecfd7a92016-04-13 08:21:07 +00001176 if (!err) {
1177 if (!rec->timestamp_filename) {
1178 record__finish_output(rec);
1179 } else {
1180 fd = record__switch_output(rec, true);
1181 if (fd < 0) {
1182 status = fd;
1183 goto out_delete_session;
1184 }
1185 }
1186 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001187
Wang Nana0748652016-11-26 07:03:28 +00001188 perf_hooks__invoke_record_end();
1189
Namhyung Kime3d59112015-01-29 17:06:44 +09001190 if (!err && !quiet) {
1191 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001192 const char *postfix = rec->timestamp_filename ?
1193 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001194
Adrian Hunteref149c22015-04-09 18:53:45 +03001195 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001196 scnprintf(samples, sizeof(samples),
1197 " (%" PRIu64 " samples)", rec->samples);
1198 else
1199 samples[0] = '\0';
1200
Wang Nanecfd7a92016-04-13 08:21:07 +00001201 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Namhyung Kime3d59112015-01-29 17:06:44 +09001202 perf_data_file__size(file) / 1024.0 / 1024.0,
Wang Nanecfd7a92016-04-13 08:21:07 +00001203 file->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001204 }
1205
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001206out_delete_session:
1207 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001208 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001209}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001210
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001211static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001212{
Kan Liangaad2b212015-01-05 13:23:04 -05001213 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001214
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001215 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001216
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001217 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001218 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001219 callchain->dump_size);
1220}
1221
1222int record_opts__parse_callchain(struct record_opts *record,
1223 struct callchain_param *callchain,
1224 const char *arg, bool unset)
1225{
1226 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001227 callchain->enabled = !unset;
1228
1229 /* --no-call-graph */
1230 if (unset) {
1231 callchain->record_mode = CALLCHAIN_NONE;
1232 pr_debug("callchain: disabled\n");
1233 return 0;
1234 }
1235
1236 ret = parse_callchain_record_opt(arg, callchain);
1237 if (!ret) {
1238 /* Enable data address sampling for DWARF unwind. */
1239 if (callchain->record_mode == CALLCHAIN_DWARF)
1240 record->sample_address = true;
1241 callchain_debug(callchain);
1242 }
1243
1244 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001245}
1246
Kan Liangc421e802015-07-29 05:42:12 -04001247int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001248 const char *arg,
1249 int unset)
1250{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001251 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001252}
1253
Kan Liangc421e802015-07-29 05:42:12 -04001254int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001255 const char *arg __maybe_unused,
1256 int unset __maybe_unused)
1257{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001258 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001259
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001260 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001261
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001262 if (callchain->record_mode == CALLCHAIN_NONE)
1263 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001264
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001265 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001266 return 0;
1267}
1268
Jiri Olsaeb853e82014-02-03 12:44:42 +01001269static int perf_record_config(const char *var, const char *value, void *cb)
1270{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001271 struct record *rec = cb;
1272
1273 if (!strcmp(var, "record.build-id")) {
1274 if (!strcmp(value, "cache"))
1275 rec->no_buildid_cache = false;
1276 else if (!strcmp(value, "no-cache"))
1277 rec->no_buildid_cache = true;
1278 else if (!strcmp(value, "skip"))
1279 rec->no_buildid = true;
1280 else
1281 return -1;
1282 return 0;
1283 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001284 if (!strcmp(var, "record.call-graph"))
Namhyung Kim5a2e5e82014-09-23 10:01:44 +09001285 var = "call-graph.record-mode"; /* fall-through */
Jiri Olsaeb853e82014-02-03 12:44:42 +01001286
1287 return perf_default_config(var, value, cb);
1288}
1289
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001290struct clockid_map {
1291 const char *name;
1292 int clockid;
1293};
1294
1295#define CLOCKID_MAP(n, c) \
1296 { .name = n, .clockid = (c), }
1297
1298#define CLOCKID_END { .name = NULL, }
1299
1300
1301/*
1302 * Add the missing ones, we need to build on many distros...
1303 */
1304#ifndef CLOCK_MONOTONIC_RAW
1305#define CLOCK_MONOTONIC_RAW 4
1306#endif
1307#ifndef CLOCK_BOOTTIME
1308#define CLOCK_BOOTTIME 7
1309#endif
1310#ifndef CLOCK_TAI
1311#define CLOCK_TAI 11
1312#endif
1313
1314static const struct clockid_map clockids[] = {
1315 /* available for all events, NMI safe */
1316 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1317 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1318
1319 /* available for some events */
1320 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1321 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1322 CLOCKID_MAP("tai", CLOCK_TAI),
1323
1324 /* available for the lazy */
1325 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1326 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1327 CLOCKID_MAP("real", CLOCK_REALTIME),
1328 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1329
1330 CLOCKID_END,
1331};
1332
1333static int parse_clockid(const struct option *opt, const char *str, int unset)
1334{
1335 struct record_opts *opts = (struct record_opts *)opt->value;
1336 const struct clockid_map *cm;
1337 const char *ostr = str;
1338
1339 if (unset) {
1340 opts->use_clockid = 0;
1341 return 0;
1342 }
1343
1344 /* no arg passed */
1345 if (!str)
1346 return 0;
1347
1348 /* no setting it twice */
1349 if (opts->use_clockid)
1350 return -1;
1351
1352 opts->use_clockid = true;
1353
1354 /* if its a number, we're done */
1355 if (sscanf(str, "%d", &opts->clockid) == 1)
1356 return 0;
1357
1358 /* allow a "CLOCK_" prefix to the name */
1359 if (!strncasecmp(str, "CLOCK_", 6))
1360 str += 6;
1361
1362 for (cm = clockids; cm->name; cm++) {
1363 if (!strcasecmp(str, cm->name)) {
1364 opts->clockid = cm->clockid;
1365 return 0;
1366 }
1367 }
1368
1369 opts->use_clockid = false;
1370 ui__warning("unknown clockid %s, check man page\n", ostr);
1371 return -1;
1372}
1373
Adrian Huntere9db1312015-04-09 18:53:46 +03001374static int record__parse_mmap_pages(const struct option *opt,
1375 const char *str,
1376 int unset __maybe_unused)
1377{
1378 struct record_opts *opts = opt->value;
1379 char *s, *p;
1380 unsigned int mmap_pages;
1381 int ret;
1382
1383 if (!str)
1384 return -EINVAL;
1385
1386 s = strdup(str);
1387 if (!s)
1388 return -ENOMEM;
1389
1390 p = strchr(s, ',');
1391 if (p)
1392 *p = '\0';
1393
1394 if (*s) {
1395 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1396 if (ret)
1397 goto out_free;
1398 opts->mmap_pages = mmap_pages;
1399 }
1400
1401 if (!p) {
1402 ret = 0;
1403 goto out_free;
1404 }
1405
1406 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1407 if (ret)
1408 goto out_free;
1409
1410 opts->auxtrace_mmap_pages = mmap_pages;
1411
1412out_free:
1413 free(s);
1414 return ret;
1415}
1416
Jiri Olsa0c582442017-01-09 10:51:59 +01001417static void switch_output_size_warn(struct record *rec)
1418{
1419 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1420 struct switch_output *s = &rec->switch_output;
1421
1422 wakeup_size /= 2;
1423
1424 if (s->size < wakeup_size) {
1425 char buf[100];
1426
1427 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1428 pr_warning("WARNING: switch-output data size lower than "
1429 "wakeup kernel buffer size (%s) "
1430 "expect bigger perf.data sizes\n", buf);
1431 }
1432}
1433
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001434static int switch_output_setup(struct record *rec)
1435{
1436 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001437 static struct parse_tag tags_size[] = {
1438 { .tag = 'B', .mult = 1 },
1439 { .tag = 'K', .mult = 1 << 10 },
1440 { .tag = 'M', .mult = 1 << 20 },
1441 { .tag = 'G', .mult = 1 << 30 },
1442 { .tag = 0 },
1443 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001444 static struct parse_tag tags_time[] = {
1445 { .tag = 's', .mult = 1 },
1446 { .tag = 'm', .mult = 60 },
1447 { .tag = 'h', .mult = 60*60 },
1448 { .tag = 'd', .mult = 60*60*24 },
1449 { .tag = 0 },
1450 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001451 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001452
1453 if (!s->set)
1454 return 0;
1455
1456 if (!strcmp(s->str, "signal")) {
1457 s->signal = true;
1458 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001459 goto enabled;
1460 }
1461
1462 val = parse_tag_value(s->str, tags_size);
1463 if (val != (unsigned long) -1) {
1464 s->size = val;
1465 pr_debug("switch-output with %s size threshold\n", s->str);
1466 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001467 }
1468
Jiri Olsabfacbe32017-01-09 10:52:00 +01001469 val = parse_tag_value(s->str, tags_time);
1470 if (val != (unsigned long) -1) {
1471 s->time = val;
1472 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1473 s->str, s->time);
1474 goto enabled;
1475 }
1476
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001477 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001478
1479enabled:
1480 rec->timestamp_filename = true;
1481 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001482
1483 if (s->size && !rec->opts.no_buffering)
1484 switch_output_size_warn(rec);
1485
Jiri Olsadc0c6122017-01-09 10:51:58 +01001486 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001487}
1488
Namhyung Kime5b2c202014-10-23 00:15:46 +09001489static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001490 "perf record [<options>] [<command>]",
1491 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001492 NULL
1493};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001494const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001495
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001496/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001497 * XXX Ideally would be local to cmd_record() and passed to a record__new
1498 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001499 * after cmd_record() exits, but since record_options need to be accessible to
1500 * builtin-script, leave it here.
1501 *
1502 * At least we don't ouch it in all the other functions here directly.
1503 *
1504 * Just say no to tons of global variables, sigh.
1505 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001506static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001507 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001508 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001509 .mmap_pages = UINT_MAX,
1510 .user_freq = UINT_MAX,
1511 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001512 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001513 .target = {
1514 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001515 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001516 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001517 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001518 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001519 .tool = {
1520 .sample = process_sample_event,
1521 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001522 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001523 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301524 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001525 .mmap = perf_event__process_mmap,
1526 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001527 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001528 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001529};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001530
Namhyung Kim76a26542015-10-22 23:28:32 +09001531const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1532 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001533
Wang Nan0aab2132016-06-16 08:02:41 +00001534static bool dry_run;
1535
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001536/*
1537 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1538 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001539 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001540 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1541 * using pipes, etc.
1542 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001543static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001544 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001545 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001546 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001547 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001548 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001549 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1550 NULL, "don't record events from perf itself",
1551 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001552 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001553 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001554 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001555 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001556 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001557 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001558 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001559 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001560 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001561 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001562 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001563 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001564 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001565 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001566 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001567 OPT_STRING('o', "output", &record.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001568 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001569 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1570 &record.opts.no_inherit_set,
1571 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001572 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1573 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001574 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001575 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
Adrian Huntere9db1312015-04-09 18:53:46 +03001576 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1577 "number of mmap data pages and AUX area tracing mmap pages",
1578 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001579 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001580 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001581 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001582 NULL, "enables call-graph recording" ,
1583 &record_callchain_opt),
1584 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001585 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001586 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001587 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001588 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001589 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001590 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001591 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001592 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001593 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001594 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1595 &record.opts.sample_time_set,
1596 "Record the sample timestamps"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001597 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001598 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001599 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001600 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1601 &record.no_buildid_cache_set,
1602 "do not update the buildid cache"),
1603 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1604 &record.no_buildid_set,
1605 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001606 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001607 "monitor event in cgroup name only",
1608 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001609 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001610 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001611 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1612 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001613
1614 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1615 "branch any", "sample any taken branches",
1616 parse_branch_stack),
1617
1618 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1619 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001620 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001621 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1622 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001623 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1624 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001625 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1626 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001627 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1628 "sample selected machine registers on interrupt,"
1629 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001630 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1631 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001632 OPT_CALLBACK('k', "clockid", &record.opts,
1633 "clockid", "clockid to use for events, see clock_gettime()",
1634 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001635 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1636 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001637 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1638 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301639 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1640 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001641 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1642 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001643 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1644 "Configure all used events to run in kernel space.",
1645 PARSE_OPT_EXCLUSIVE),
1646 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1647 "Configure all used events to run in user space.",
1648 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001649 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1650 "clang binary to use for compiling BPF scriptlets"),
1651 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1652 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001653 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1654 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001655 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1656 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001657 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1658 "append timestamp to output filename"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001659 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001660 &record.switch_output.set, "signal,size,time",
1661 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001662 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001663 OPT_BOOLEAN(0, "dry-run", &dry_run,
1664 "Parse options then exit"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001665 OPT_END()
1666};
1667
Namhyung Kime5b2c202014-10-23 00:15:46 +09001668struct option *record_options = __record_options;
1669
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001670int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001671{
Adrian Hunteref149c22015-04-09 18:53:45 +03001672 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001673 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001674 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001675
Wang Nan48e1cab2015-12-14 10:39:22 +00001676#ifndef HAVE_LIBBPF_SUPPORT
1677# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1678 set_nobuild('\0', "clang-path", true);
1679 set_nobuild('\0', "clang-opt", true);
1680# undef set_nobuild
1681#endif
1682
He Kuang7efe0e02015-12-14 10:39:23 +00001683#ifndef HAVE_BPF_PROLOGUE
1684# if !defined (HAVE_DWARF_SUPPORT)
1685# define REASON "NO_DWARF=1"
1686# elif !defined (HAVE_LIBBPF_SUPPORT)
1687# define REASON "NO_LIBBPF=1"
1688# else
1689# define REASON "this architecture doesn't support BPF prologue"
1690# endif
1691# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1692 set_nobuild('\0', "vmlinux", true);
1693# undef set_nobuild
1694# undef REASON
1695#endif
1696
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001697 rec->evlist = perf_evlist__new();
1698 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001699 return -ENOMEM;
1700
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001701 err = perf_config(perf_record_config, rec);
1702 if (err)
1703 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001704
Tom Zanussibca647a2010-11-10 08:11:30 -06001705 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001706 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09001707 if (quiet)
1708 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01001709
1710 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001711 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01001712 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001713
Namhyung Kimbea03402012-04-26 14:15:15 +09001714 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001715 usage_with_options_msg(record_usage, record_options,
1716 "cgroup monitoring only available in system-wide mode");
1717
Stephane Eranian023695d2011-02-14 11:20:01 +02001718 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001719 if (rec->opts.record_switch_events &&
1720 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001721 ui__error("kernel does not support recording context switch events\n");
1722 parse_options_usage(record_usage, record_options, "switch-events", 0);
1723 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001724 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001725
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001726 if (switch_output_setup(rec)) {
1727 parse_options_usage(record_usage, record_options, "switch-output", 0);
1728 return -EINVAL;
1729 }
1730
Jiri Olsabfacbe32017-01-09 10:52:00 +01001731 if (rec->switch_output.time) {
1732 signal(SIGALRM, alarm_sig_handler);
1733 alarm(rec->switch_output.time);
1734 }
1735
Adrian Hunteref149c22015-04-09 18:53:45 +03001736 if (!rec->itr) {
1737 rec->itr = auxtrace_record__init(rec->evlist, &err);
1738 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001739 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001740 }
1741
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001742 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1743 rec->opts.auxtrace_snapshot_opts);
1744 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001745 goto out;
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001746
Adrian Hunter1b36c032016-09-23 17:38:39 +03001747 /*
1748 * Allow aliases to facilitate the lookup of symbols for address
1749 * filters. Refer to auxtrace_parse_filters().
1750 */
1751 symbol_conf.allow_aliases = true;
1752
1753 symbol__init(NULL);
1754
1755 err = auxtrace_parse_filters(rec->evlist);
1756 if (err)
1757 goto out;
1758
Wang Nan0aab2132016-06-16 08:02:41 +00001759 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001760 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00001761
Wang Nand7888572016-04-08 15:07:24 +00001762 err = bpf__setup_stdout(rec->evlist);
1763 if (err) {
1764 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1765 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1766 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001767 goto out;
Wang Nand7888572016-04-08 15:07:24 +00001768 }
1769
Adrian Hunteref149c22015-04-09 18:53:45 +03001770 err = -ENOMEM;
1771
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001772 if (symbol_conf.kptr_restrict)
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03001773 pr_warning(
1774"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1775"check /proc/sys/kernel/kptr_restrict.\n\n"
1776"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1777"file is not found in the buildid cache or in the vmlinux path.\n\n"
1778"Samples in kernel modules won't be resolved at all.\n\n"
1779"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1780"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001781
Wang Nan0c1d46a2016-04-20 18:59:52 +00001782 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001783 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01001784 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00001785 /*
1786 * In 'perf record --switch-output', disable buildid
1787 * generation by default to reduce data file switching
1788 * overhead. Still generate buildid if they are required
1789 * explicitly using
1790 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01001791 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00001792 * --no-no-buildid-cache
1793 *
1794 * Following code equals to:
1795 *
1796 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1797 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1798 * disable_buildid_cache();
1799 */
1800 bool disable = true;
1801
1802 if (rec->no_buildid_set && !rec->no_buildid)
1803 disable = false;
1804 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1805 disable = false;
1806 if (disable) {
1807 rec->no_buildid = true;
1808 rec->no_buildid_cache = true;
1809 disable_buildid_cache();
1810 }
1811 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001812
Wang Nan4ea648a2016-07-14 08:34:47 +00001813 if (record.opts.overwrite)
1814 record.opts.tail_synthesize = true;
1815
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001816 if (rec->evlist->nr_entries == 0 &&
1817 perf_evlist__add_default(rec->evlist) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001818 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03001819 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001820 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001821
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001822 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1823 rec->opts.no_inherit = true;
1824
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001825 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001826 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001827 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001828 ui__warning("%s", errbuf);
1829 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001830
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001831 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001832 if (err) {
1833 int saved_errno = errno;
1834
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001835 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001836 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001837
1838 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001839 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001840 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001841
Jiri Olsa23dc4f12016-12-12 11:35:43 +01001842 /* Enable ignoring missing threads when -u option is defined. */
1843 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1844
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001845 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001846 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001847 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001848
Adrian Hunteref149c22015-04-09 18:53:45 +03001849 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1850 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03001851 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001852
Namhyung Kim61566812016-01-11 22:37:09 +09001853 /*
1854 * We take all buildids when the file contains
1855 * AUX area tracing data because we do not decode the
1856 * trace because it would take too long.
1857 */
1858 if (rec->opts.full_auxtrace)
1859 rec->buildid_all = true;
1860
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001861 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001862 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001863 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001864 }
1865
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001866 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03001867out:
Namhyung Kim45604712014-05-12 09:47:24 +09001868 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001869 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001870 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001871 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001872}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001873
1874static void snapshot_sig_handler(int sig __maybe_unused)
1875{
Jiri Olsadc0c6122017-01-09 10:51:58 +01001876 struct record *rec = &record;
1877
Wang Nan5f9cf592016-04-20 18:59:49 +00001878 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1879 trigger_hit(&auxtrace_snapshot_trigger);
1880 auxtrace_record__snapshot_started = 1;
1881 if (auxtrace_record__snapshot_start(record.itr))
1882 trigger_error(&auxtrace_snapshot_trigger);
1883 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001884
Jiri Olsadc0c6122017-01-09 10:51:58 +01001885 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001886 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001887}
Jiri Olsabfacbe32017-01-09 10:52:00 +01001888
1889static void alarm_sig_handler(int sig __maybe_unused)
1890{
1891 struct record *rec = &record;
1892
1893 if (switch_output_time(rec))
1894 trigger_hit(&switch_output_trigger);
1895}