blob: 10cf889c6d75d2db1d78014215e30948ae3b1cb4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060026#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020027#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020028#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020029#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110030#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020031#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020032#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020033#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030034#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020035#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070036#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020037#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020045
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030046#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030047#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030048#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030049#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020050#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020051#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030052#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030053#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030055#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030056
Jiri Olsa1b43b702017-01-09 10:51:56 +010057struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010058 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010059 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010060 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010061 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010062 const char *str;
63 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010064};
65
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030066struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020067 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030068 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020069 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010070 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030071 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020072 struct perf_evlist *evlist;
73 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020074 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000076 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000078 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090079 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000080 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080081 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010082 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070083 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020084};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020085
Jiri Olsadc0c6122017-01-09 10:51:58 +010086static volatile int auxtrace_record__snapshot_started;
87static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
88static DEFINE_TRIGGER(switch_output_trigger);
89
90static bool switch_output_signal(struct record *rec)
91{
92 return rec->switch_output.signal &&
93 trigger_is_ready(&switch_output_trigger);
94}
95
96static bool switch_output_size(struct record *rec)
97{
98 return rec->switch_output.size &&
99 trigger_is_ready(&switch_output_trigger) &&
100 (rec->bytes_written >= rec->switch_output.size);
101}
102
Jiri Olsabfacbe32017-01-09 10:52:00 +0100103static bool switch_output_time(struct record *rec)
104{
105 return rec->switch_output.time &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200109static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
110 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200111{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200112 struct perf_data_file *file = &rec->session->data->file;
113
114 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100115 pr_err("failed to write perf data, error: %m\n");
116 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200117 }
David Ahern8d3eca22012-08-26 12:24:47 -0600118
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300119 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100120
121 if (switch_output_size(rec))
122 trigger_hit(&switch_output_trigger);
123
David Ahern8d3eca22012-08-26 12:24:47 -0600124 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200125}
126
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200127static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200128 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300129 struct perf_sample *sample __maybe_unused,
130 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200131{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300132 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200133 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200134}
135
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200136static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300137{
138 struct record *rec = to;
139
140 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200141 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300142}
143
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300144static volatile int done;
145static volatile int signr = -1;
146static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000147
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300148static void sig_handler(int sig)
149{
150 if (sig == SIGCHLD)
151 child_finished = 1;
152 else
153 signr = sig;
154
155 done = 1;
156}
157
Wang Nana0748652016-11-26 07:03:28 +0000158static void sigsegv_handler(int sig)
159{
160 perf_hooks__recover();
161 sighandler_dump_stack(sig);
162}
163
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300164static void record__sig_exit(void)
165{
166 if (signr == -1)
167 return;
168
169 signal(signr, SIG_DFL);
170 raise(signr);
171}
172
Adrian Huntere31f0d02015-04-30 17:37:27 +0300173#ifdef HAVE_AUXTRACE_SUPPORT
174
Adrian Hunteref149c22015-04-09 18:53:45 +0300175static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200176 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300177 union perf_event *event, void *data1,
178 size_t len1, void *data2, size_t len2)
179{
180 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100181 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300182 size_t padding;
183 u8 pad[8] = {0};
184
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100185 if (!perf_data__is_pipe(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300186 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100187 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300188 int err;
189
190 file_offset = lseek(fd, 0, SEEK_CUR);
191 if (file_offset == -1)
192 return -1;
193 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
194 event, file_offset);
195 if (err)
196 return err;
197 }
198
Adrian Hunteref149c22015-04-09 18:53:45 +0300199 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
200 padding = (len1 + len2) & 7;
201 if (padding)
202 padding = 8 - padding;
203
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200204 record__write(rec, map, event, event->header.size);
205 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300206 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200207 record__write(rec, map, data2, len2);
208 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300209
210 return 0;
211}
212
213static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200214 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300215{
216 int ret;
217
Jiri Olsae035f4c2018-09-13 14:54:05 +0200218 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300219 record__process_auxtrace);
220 if (ret < 0)
221 return ret;
222
223 if (ret)
224 rec->samples++;
225
226 return 0;
227}
228
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300229static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200230 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300231{
232 int ret;
233
Jiri Olsae035f4c2018-09-13 14:54:05 +0200234 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300235 record__process_auxtrace,
236 rec->opts.auxtrace_snapshot_size);
237 if (ret < 0)
238 return ret;
239
240 if (ret)
241 rec->samples++;
242
243 return 0;
244}
245
246static int record__auxtrace_read_snapshot_all(struct record *rec)
247{
248 int i;
249 int rc = 0;
250
251 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200252 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300253
Jiri Olsae035f4c2018-09-13 14:54:05 +0200254 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300255 continue;
256
Jiri Olsae035f4c2018-09-13 14:54:05 +0200257 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300258 rc = -1;
259 goto out;
260 }
261 }
262out:
263 return rc;
264}
265
266static void record__read_auxtrace_snapshot(struct record *rec)
267{
268 pr_debug("Recording AUX area tracing snapshot\n");
269 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000270 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300271 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000272 if (auxtrace_record__snapshot_finish(rec->itr))
273 trigger_error(&auxtrace_snapshot_trigger);
274 else
275 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300276 }
277}
278
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200279static int record__auxtrace_init(struct record *rec)
280{
281 int err;
282
283 if (!rec->itr) {
284 rec->itr = auxtrace_record__init(rec->evlist, &err);
285 if (err)
286 return err;
287 }
288
289 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
290 rec->opts.auxtrace_snapshot_opts);
291 if (err)
292 return err;
293
294 return auxtrace_parse_filters(rec->evlist);
295}
296
Adrian Huntere31f0d02015-04-30 17:37:27 +0300297#else
298
299static inline
300int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200301 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300302{
303 return 0;
304}
305
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300306static inline
307void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
308{
309}
310
311static inline
312int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
313{
314 return 0;
315}
316
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200317static int record__auxtrace_init(struct record *rec __maybe_unused)
318{
319 return 0;
320}
321
Adrian Huntere31f0d02015-04-30 17:37:27 +0300322#endif
323
Wang Nancda57a82016-06-27 10:24:03 +0000324static int record__mmap_evlist(struct record *rec,
325 struct perf_evlist *evlist)
326{
327 struct record_opts *opts = &rec->opts;
328 char msg[512];
329
Wang Nan7a276ff2017-12-03 02:00:38 +0000330 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000331 opts->auxtrace_mmap_pages,
332 opts->auxtrace_snapshot_mode) < 0) {
333 if (errno == EPERM) {
334 pr_err("Permission error mapping pages.\n"
335 "Consider increasing "
336 "/proc/sys/kernel/perf_event_mlock_kb,\n"
337 "or try again with a smaller value of -m/--mmap_pages.\n"
338 "(current value: %u,%u)\n",
339 opts->mmap_pages, opts->auxtrace_mmap_pages);
340 return -errno;
341 } else {
342 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300343 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000344 if (errno)
345 return -errno;
346 else
347 return -EINVAL;
348 }
349 }
350 return 0;
351}
352
353static int record__mmap(struct record *rec)
354{
355 return record__mmap_evlist(rec, rec->evlist);
356}
357
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300358static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200359{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300360 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200361 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200362 struct perf_evlist *evlist = rec->evlist;
363 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300364 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600365 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600366 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200367
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300368 /*
369 * For initial_delay we need to add a dummy event so that we can track
370 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
371 * real events, the ones asked by the user.
372 */
373 if (opts->initial_delay) {
374 if (perf_evlist__add_dummy(evlist))
375 return -ENOMEM;
376
377 pos = perf_evlist__first(evlist);
378 pos->tracking = 0;
379 pos = perf_evlist__last(evlist);
380 pos->tracking = 1;
381 pos->attr.enable_on_exec = 1;
382 }
383
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300384 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100385
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300386 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200387try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400388 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300389 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900390 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300391 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300392 goto try_again;
393 }
David Ahernca6a4252011-03-25 13:11:11 -0600394
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300395 rc = -errno;
396 perf_evsel__open_strerror(pos, &opts->target,
397 errno, msg, sizeof(msg));
398 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600399 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300400 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800401
402 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800403 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200404
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300405 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300406 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300407 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300408 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600409 rc = -1;
410 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100411 }
412
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600413 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300414 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600415 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
416 str_error_r(errno, msg, sizeof(msg)));
417 rc = -1;
418 goto out;
419 }
420
Wang Nancda57a82016-06-27 10:24:03 +0000421 rc = record__mmap(rec);
422 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600423 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200424
Jiri Olsa563aecb2013-06-05 13:35:06 +0200425 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300426 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600427out:
428 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200429}
430
Namhyung Kime3d59112015-01-29 17:06:44 +0900431static int process_sample_event(struct perf_tool *tool,
432 union perf_event *event,
433 struct perf_sample *sample,
434 struct perf_evsel *evsel,
435 struct machine *machine)
436{
437 struct record *rec = container_of(tool, struct record, tool);
438
Jin Yao68588ba2017-12-08 21:13:42 +0800439 if (rec->evlist->first_sample_time == 0)
440 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900441
Jin Yao68588ba2017-12-08 21:13:42 +0800442 rec->evlist->last_sample_time = sample->time;
443
444 if (rec->buildid_all)
445 return 0;
446
447 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900448 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
449}
450
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300451static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200452{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100453 struct perf_data *data = &rec->data;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200454 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200455
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100456 if (data->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300457 return 0;
458
Namhyung Kim00dc8652014-11-04 10:14:32 +0900459 /*
460 * During this process, it'll load kernel map and replace the
461 * dso->long_name to a real pathname it found. In this case
462 * we prefer the vmlinux path like
463 * /lib/modules/3.16.4/build/vmlinux
464 *
465 * rather than build-id path (in debug directory).
466 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
467 */
468 symbol_conf.ignore_vmlinux_buildid = true;
469
Namhyung Kim61566812016-01-11 22:37:09 +0900470 /*
471 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800472 * so no need to process samples. But if timestamp_boundary is enabled,
473 * it still needs to walk on all samples to get the timestamps of
474 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900475 */
Jin Yao68588ba2017-12-08 21:13:42 +0800476 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900477 rec->tool.sample = NULL;
478
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300479 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200480}
481
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200482static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800483{
484 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200485 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800486 /*
487 *As for guest kernel when processing subcommand record&report,
488 *we arrange module mmap prior to guest kernel mmap and trigger
489 *a preload dso because default guest module symbols are loaded
490 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
491 *method is used to avoid symbol missing when the first addr is
492 *in module instead of in guest kernel.
493 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200494 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200495 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800496 if (err < 0)
497 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300498 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800499
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800500 /*
501 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
502 * have no _text sometimes.
503 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200504 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200505 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800506 if (err < 0)
507 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300508 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800509}
510
Frederic Weisbecker98402802010-05-02 22:05:29 +0200511static struct perf_event_header finished_round_event = {
512 .size = sizeof(struct perf_event_header),
513 .type = PERF_RECORD_FINISHED_ROUND,
514};
515
Wang Nana4ea0ec2016-07-14 08:34:36 +0000516static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Wang Nan0b72d692017-12-04 16:51:07 +0000517 bool overwrite)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200518{
Jiri Olsadcabb502014-07-25 16:56:16 +0200519 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200520 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600521 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000522 struct perf_mmap *maps;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200523
Wang Nancb216862016-06-27 10:24:04 +0000524 if (!evlist)
525 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300526
Wang Nan0b72d692017-12-04 16:51:07 +0000527 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000528 if (!maps)
529 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000530
Wang Nan0b72d692017-12-04 16:51:07 +0000531 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000532 return 0;
533
Wang Nana4ea0ec2016-07-14 08:34:36 +0000534 for (i = 0; i < evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200535 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000536
Jiri Olsae035f4c2018-09-13 14:54:05 +0200537 if (map->base) {
538 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600539 rc = -1;
540 goto out;
541 }
542 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300543
Jiri Olsae035f4c2018-09-13 14:54:05 +0200544 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
545 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300546 rc = -1;
547 goto out;
548 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200549 }
550
Jiri Olsadcabb502014-07-25 16:56:16 +0200551 /*
552 * Mark the round finished in case we wrote
553 * at least one event.
554 */
555 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200556 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600557
Wang Nan0b72d692017-12-04 16:51:07 +0000558 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000559 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600560out:
561 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200562}
563
Wang Nancb216862016-06-27 10:24:04 +0000564static int record__mmap_read_all(struct record *rec)
565{
566 int err;
567
Wang Nana4ea0ec2016-07-14 08:34:36 +0000568 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000569 if (err)
570 return err;
571
Wang Nan057374642016-07-14 08:34:43 +0000572 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000573}
574
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300575static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700576{
David Ahern57706ab2013-11-06 11:41:34 -0700577 struct perf_session *session = rec->session;
578 int feat;
579
580 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
581 perf_header__set_feat(&session->header, feat);
582
583 if (rec->no_buildid)
584 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
585
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300586 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700587 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
588
589 if (!rec->opts.branch_stack)
590 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300591
592 if (!rec->opts.full_auxtrace)
593 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100594
Alexey Budankovcf790512018-10-09 17:36:24 +0300595 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
596 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
597
Jiri Olsaffa517a2015-10-25 15:51:43 +0100598 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700599}
600
Wang Nane1ab48b2016-02-26 09:32:10 +0000601static void
602record__finish_output(struct record *rec)
603{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100604 struct perf_data *data = &rec->data;
605 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000606
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100607 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000608 return;
609
610 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100611 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000612
613 if (!rec->no_buildid) {
614 process_buildids(rec);
615
616 if (rec->buildid_all)
617 dsos__hit_all(rec->session);
618 }
619 perf_session__write_header(rec->session, rec->evlist, fd, true);
620
621 return;
622}
623
Wang Nan4ea648a2016-07-14 08:34:47 +0000624static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000625{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300626 int err;
627 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000628
Wang Nan4ea648a2016-07-14 08:34:47 +0000629 if (rec->opts.tail_synthesize != tail)
630 return 0;
631
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300632 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
633 if (thread_map == NULL)
634 return -1;
635
636 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000637 process_synthesized_event,
638 &rec->session->machines.host,
639 rec->opts.sample_address,
640 rec->opts.proc_map_timeout);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300641 thread_map__put(thread_map);
642 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000643}
644
Wang Nan4ea648a2016-07-14 08:34:47 +0000645static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000646
Wang Nanecfd7a92016-04-13 08:21:07 +0000647static int
648record__switch_output(struct record *rec, bool at_exit)
649{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100650 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000651 int fd, err;
652
653 /* Same Size: "2015122520103046"*/
654 char timestamp[] = "InvalidTimestamp";
655
Wang Nan4ea648a2016-07-14 08:34:47 +0000656 record__synthesize(rec, true);
657 if (target__none(&rec->opts.target))
658 record__synthesize_workload(rec, true);
659
Wang Nanecfd7a92016-04-13 08:21:07 +0000660 rec->samples = 0;
661 record__finish_output(rec);
662 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
663 if (err) {
664 pr_err("Failed to get current timestamp\n");
665 return -EINVAL;
666 }
667
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100668 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000669 rec->session->header.data_offset,
670 at_exit);
671 if (fd >= 0 && !at_exit) {
672 rec->bytes_written = 0;
673 rec->session->header.data_size = 0;
674 }
675
676 if (!quiet)
677 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +0100678 data->file.path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000679
680 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000681 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000682 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000683
Wang Nanbe7b0c92016-04-20 18:59:54 +0000684 /*
685 * In 'perf record --switch-output' without -a,
686 * record__synthesize() in record__switch_output() won't
687 * generate tracking events because there's no thread_map
688 * in evlist. Which causes newly created perf.data doesn't
689 * contain map and comm information.
690 * Create a fake thread_map and directly call
691 * perf_event__synthesize_thread_map() for those events.
692 */
693 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000694 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000695 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000696 return fd;
697}
698
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300699static volatile int workload_exec_errno;
700
701/*
702 * perf_evlist__prepare_workload will send a SIGUSR1
703 * if the fork fails, since we asked by setting its
704 * want_signal to true.
705 */
Namhyung Kim45604712014-05-12 09:47:24 +0900706static void workload_exec_failed_signal(int signo __maybe_unused,
707 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300708 void *ucontext __maybe_unused)
709{
710 workload_exec_errno = info->si_value.sival_int;
711 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300712 child_finished = 1;
713}
714
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300715static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100716static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300717
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200718int __weak
719perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
720 struct perf_tool *tool __maybe_unused,
721 perf_event__handler_t process __maybe_unused,
722 struct machine *machine __maybe_unused)
723{
724 return 0;
725}
726
Wang Nanee667f92016-06-27 10:24:05 +0000727static const struct perf_event_mmap_page *
728perf_evlist__pick_pc(struct perf_evlist *evlist)
729{
Wang Nanb2cb6152016-07-14 08:34:39 +0000730 if (evlist) {
731 if (evlist->mmap && evlist->mmap[0].base)
732 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +0000733 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
734 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +0000735 }
Wang Nanee667f92016-06-27 10:24:05 +0000736 return NULL;
737}
738
Wang Nanc45628b2016-05-24 02:28:59 +0000739static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
740{
Wang Nanee667f92016-06-27 10:24:05 +0000741 const struct perf_event_mmap_page *pc;
742
743 pc = perf_evlist__pick_pc(rec->evlist);
744 if (pc)
745 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000746 return NULL;
747}
748
Wang Nan4ea648a2016-07-14 08:34:47 +0000749static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000750{
751 struct perf_session *session = rec->session;
752 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100753 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000754 struct record_opts *opts = &rec->opts;
755 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100756 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +0000757 int err = 0;
758
Wang Nan4ea648a2016-07-14 08:34:47 +0000759 if (rec->opts.tail_synthesize != tail)
760 return 0;
761
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100762 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +0100763 /*
764 * We need to synthesize events first, because some
765 * features works on top of them (on report side).
766 */
Jiri Olsa318ec182018-08-30 08:32:15 +0200767 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +0000768 process_synthesized_event);
769 if (err < 0) {
770 pr_err("Couldn't synthesize attrs.\n");
771 goto out;
772 }
773
Jiri Olsaa2015512018-03-14 10:22:04 +0100774 err = perf_event__synthesize_features(tool, session, rec->evlist,
775 process_synthesized_event);
776 if (err < 0) {
777 pr_err("Couldn't synthesize features.\n");
778 return err;
779 }
780
Wang Nanc45c86e2016-02-26 09:32:07 +0000781 if (have_tracepoints(&rec->evlist->entries)) {
782 /*
783 * FIXME err <= 0 here actually means that
784 * there were no tracepoints so its not really
785 * an error, just that we don't need to
786 * synthesize anything. We really have to
787 * return this more properly and also
788 * propagate errors that now are calling die()
789 */
790 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
791 process_synthesized_event);
792 if (err <= 0) {
793 pr_err("Couldn't record tracing data.\n");
794 goto out;
795 }
796 rec->bytes_written += err;
797 }
798 }
799
Wang Nanc45628b2016-05-24 02:28:59 +0000800 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200801 process_synthesized_event, machine);
802 if (err)
803 goto out;
804
Wang Nanc45c86e2016-02-26 09:32:07 +0000805 if (rec->opts.full_auxtrace) {
806 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
807 session, process_synthesized_event);
808 if (err)
809 goto out;
810 }
811
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -0300812 if (!perf_evlist__exclude_kernel(rec->evlist)) {
813 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
814 machine);
815 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
816 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
817 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +0000818
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -0300819 err = perf_event__synthesize_modules(tool, process_synthesized_event,
820 machine);
821 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
822 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
823 "Check /proc/modules permission or run as root.\n");
824 }
Wang Nanc45c86e2016-02-26 09:32:07 +0000825
826 if (perf_guest) {
827 machines__process_guests(&session->machines,
828 perf_event__synthesize_guest_os, tool);
829 }
830
Andi Kleenbfd8f722017-11-17 13:42:58 -0800831 err = perf_event__synthesize_extra_attr(&rec->tool,
832 rec->evlist,
833 process_synthesized_event,
834 data->is_pipe);
835 if (err)
836 goto out;
837
Andi Kleen373565d2017-11-17 13:42:59 -0800838 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
839 process_synthesized_event,
840 NULL);
841 if (err < 0) {
842 pr_err("Couldn't synthesize thread map.\n");
843 return err;
844 }
845
846 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
847 process_synthesized_event, NULL);
848 if (err < 0) {
849 pr_err("Couldn't synthesize cpu map.\n");
850 return err;
851 }
852
Wang Nanc45c86e2016-02-26 09:32:07 +0000853 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
854 process_synthesized_event, opts->sample_address,
Kan Liang340b47f2017-09-29 07:47:54 -0700855 opts->proc_map_timeout, 1);
Wang Nanc45c86e2016-02-26 09:32:07 +0000856out:
857 return err;
858}
859
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300860static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200861{
David Ahern57706ab2013-11-06 11:41:34 -0700862 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900863 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200864 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300865 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200866 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300867 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100868 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200869 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300870 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900871 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200872
Namhyung Kim45604712014-05-12 09:47:24 +0900873 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200874 signal(SIGCHLD, sig_handler);
875 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600876 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +0000877 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000878
Hari Bathinif3b36142017-03-08 02:11:43 +0530879 if (rec->opts.record_namespaces)
880 tool->namespace_events = true;
881
Jiri Olsadc0c6122017-01-09 10:51:58 +0100882 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300883 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000884 if (rec->opts.auxtrace_snapshot_mode)
885 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +0100886 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000887 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000888 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300889 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000890 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200891
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100892 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200893 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900894 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200895 return -1;
896 }
897
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100898 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200899 rec->session = session;
900
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300901 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100902
Alexey Budankovcf790512018-10-09 17:36:24 +0300903 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
904 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
905
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200906 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300907 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100908 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300909 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200910 if (err < 0) {
911 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900912 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200913 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200914 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100915 }
916
Jiri Olsaad46e48c2018-03-02 17:13:54 +0100917 /*
918 * If we have just single event and are sending data
919 * through pipe, we need to force the ids allocation,
920 * because we synthesize event name through the pipe
921 * and need the id for that.
922 */
923 if (data->is_pipe && rec->evlist->nr_entries == 1)
924 rec->opts.sample_id = true;
925
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300926 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600927 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900928 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600929 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200930
Wang Nan8690a2a2016-02-22 09:10:32 +0000931 err = bpf__apply_obj_config();
932 if (err) {
933 char errbuf[BUFSIZ];
934
935 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
936 pr_err("ERROR: Apply config to BPF failed: %s\n",
937 errbuf);
938 goto out_child;
939 }
940
Adrian Huntercca84822015-08-19 17:29:21 +0300941 /*
942 * Normally perf_session__new would do this, but it doesn't have the
943 * evlist.
944 */
945 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
946 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
947 rec->tool.ordered_events = false;
948 }
949
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300950 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900951 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
952
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100953 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900954 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500955 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900956 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200957 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900958 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200959 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900960 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200961 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200962
David Ahernd3665492012-02-06 15:27:52 -0700963 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100964 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700965 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100966 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600967 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900968 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100969 }
970
Wang Nan4ea648a2016-07-14 08:34:47 +0000971 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +0000972 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900973 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600974
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200975 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200976 struct sched_param param;
977
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200978 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200979 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200980 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600981 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900982 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200983 }
984 }
985
Jiri Olsa774cb492012-11-12 18:34:01 +0100986 /*
987 * When perf is starting the traced process, all the events
988 * (apart from group members) have enable_on_exec=1 set,
989 * so don't spoil it by prematurely enabling them.
990 */
Andi Kleen6619a532014-01-11 13:38:27 -0800991 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300992 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600993
Peter Zijlstra856e9662009-12-16 17:55:55 +0100994 /*
995 * Let the child rip
996 */
Namhyung Kime803cf92015-09-22 09:24:55 +0900997 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +0100998 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +0900999 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301000 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001001
1002 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1003 if (event == NULL) {
1004 err = -ENOMEM;
1005 goto out_child;
1006 }
1007
Namhyung Kime803cf92015-09-22 09:24:55 +09001008 /*
1009 * Some H/W events are generated before COMM event
1010 * which is emitted during exec(), so perf script
1011 * cannot see a correct process name for those events.
1012 * Synthesize COMM event to prevent it.
1013 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301014 tgid = perf_event__synthesize_comm(tool, event,
1015 rec->evlist->workload.pid,
1016 process_synthesized_event,
1017 machine);
1018 free(event);
1019
1020 if (tgid == -1)
1021 goto out_child;
1022
1023 event = malloc(sizeof(event->namespaces) +
1024 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1025 machine->id_hdr_size);
1026 if (event == NULL) {
1027 err = -ENOMEM;
1028 goto out_child;
1029 }
1030
1031 /*
1032 * Synthesize NAMESPACES event for the command specified.
1033 */
1034 perf_event__synthesize_namespaces(tool, event,
1035 rec->evlist->workload.pid,
1036 tgid, process_synthesized_event,
1037 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001038 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001039
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001040 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001041 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001042
Andi Kleen6619a532014-01-11 13:38:27 -08001043 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001044 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001045 perf_evlist__enable(rec->evlist);
1046 }
1047
Wang Nan5f9cf592016-04-20 18:59:49 +00001048 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001049 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001050 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001051 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001052 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001053
Wang Nan057374642016-07-14 08:34:43 +00001054 /*
1055 * rec->evlist->bkw_mmap_state is possible to be
1056 * BKW_MMAP_EMPTY here: when done == true and
1057 * hits != rec->samples in previous round.
1058 *
1059 * perf_evlist__toggle_bkw_mmap ensure we never
1060 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1061 */
1062 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1063 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1064
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001065 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001066 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001067 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001068 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001069 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001070 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001071
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001072 if (auxtrace_record__snapshot_started) {
1073 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001074 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001075 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001076 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001077 pr_err("AUX area tracing snapshot failed\n");
1078 err = -1;
1079 goto out_child;
1080 }
1081 }
1082
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001083 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001084 /*
1085 * If switch_output_trigger is hit, the data in
1086 * overwritable ring buffer should have been collected,
1087 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1088 *
1089 * If SIGUSR2 raise after or during record__mmap_read_all(),
1090 * record__mmap_read_all() didn't collect data from
1091 * overwritable ring buffer. Read again.
1092 */
1093 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1094 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001095 trigger_ready(&switch_output_trigger);
1096
Wang Nan057374642016-07-14 08:34:43 +00001097 /*
1098 * Reenable events in overwrite ring buffer after
1099 * record__mmap_read_all(): we should have collected
1100 * data from it.
1101 */
1102 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1103
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001104 if (!quiet)
1105 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1106 waking);
1107 waking = 0;
1108 fd = record__switch_output(rec, false);
1109 if (fd < 0) {
1110 pr_err("Failed to switch to new file\n");
1111 trigger_error(&switch_output_trigger);
1112 err = fd;
1113 goto out_child;
1114 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001115
1116 /* re-arm the alarm */
1117 if (rec->switch_output.time)
1118 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001119 }
1120
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001121 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001122 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001123 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001124 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001125 /*
1126 * Propagate error, only if there's any. Ignore positive
1127 * number of returned events and interrupt error.
1128 */
1129 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001130 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001131 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001132
1133 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1134 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001135 }
1136
Jiri Olsa774cb492012-11-12 18:34:01 +01001137 /*
1138 * When perf is starting the traced process, at the end events
1139 * die with the process and we wait for that. Thus no need to
1140 * disable events in this case.
1141 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001142 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001143 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001144 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001145 disabled = true;
1146 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001147 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001148 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001149 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001150
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001151 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001152 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001153 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001154 pr_err("Workload failed: %s\n", emsg);
1155 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001156 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001157 }
1158
Namhyung Kime3d59112015-01-29 17:06:44 +09001159 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001160 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001161
Wang Nan4ea648a2016-07-14 08:34:47 +00001162 if (target__none(&rec->opts.target))
1163 record__synthesize_workload(rec, true);
1164
Namhyung Kim45604712014-05-12 09:47:24 +09001165out_child:
1166 if (forks) {
1167 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001168
Namhyung Kim45604712014-05-12 09:47:24 +09001169 if (!child_finished)
1170 kill(rec->evlist->workload.pid, SIGTERM);
1171
1172 wait(&exit_status);
1173
1174 if (err < 0)
1175 status = err;
1176 else if (WIFEXITED(exit_status))
1177 status = WEXITSTATUS(exit_status);
1178 else if (WIFSIGNALED(exit_status))
1179 signr = WTERMSIG(exit_status);
1180 } else
1181 status = err;
1182
Wang Nan4ea648a2016-07-14 08:34:47 +00001183 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001184 /* this will be recalculated during process_buildids() */
1185 rec->samples = 0;
1186
Wang Nanecfd7a92016-04-13 08:21:07 +00001187 if (!err) {
1188 if (!rec->timestamp_filename) {
1189 record__finish_output(rec);
1190 } else {
1191 fd = record__switch_output(rec, true);
1192 if (fd < 0) {
1193 status = fd;
1194 goto out_delete_session;
1195 }
1196 }
1197 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001198
Wang Nana0748652016-11-26 07:03:28 +00001199 perf_hooks__invoke_record_end();
1200
Namhyung Kime3d59112015-01-29 17:06:44 +09001201 if (!err && !quiet) {
1202 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001203 const char *postfix = rec->timestamp_filename ?
1204 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001205
Adrian Hunteref149c22015-04-09 18:53:45 +03001206 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001207 scnprintf(samples, sizeof(samples),
1208 " (%" PRIu64 " samples)", rec->samples);
1209 else
1210 samples[0] = '\0';
1211
Wang Nanecfd7a92016-04-13 08:21:07 +00001212 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001213 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001214 data->file.path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001215 }
1216
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001217out_delete_session:
1218 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001219 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001220}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001221
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001222static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001223{
Kan Liangaad2b212015-01-05 13:23:04 -05001224 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001225
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001226 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001227
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001228 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001229 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001230 callchain->dump_size);
1231}
1232
1233int record_opts__parse_callchain(struct record_opts *record,
1234 struct callchain_param *callchain,
1235 const char *arg, bool unset)
1236{
1237 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001238 callchain->enabled = !unset;
1239
1240 /* --no-call-graph */
1241 if (unset) {
1242 callchain->record_mode = CALLCHAIN_NONE;
1243 pr_debug("callchain: disabled\n");
1244 return 0;
1245 }
1246
1247 ret = parse_callchain_record_opt(arg, callchain);
1248 if (!ret) {
1249 /* Enable data address sampling for DWARF unwind. */
1250 if (callchain->record_mode == CALLCHAIN_DWARF)
1251 record->sample_address = true;
1252 callchain_debug(callchain);
1253 }
1254
1255 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001256}
1257
Kan Liangc421e802015-07-29 05:42:12 -04001258int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001259 const char *arg,
1260 int unset)
1261{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001262 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001263}
1264
Kan Liangc421e802015-07-29 05:42:12 -04001265int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001266 const char *arg __maybe_unused,
1267 int unset __maybe_unused)
1268{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001269 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001270
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001271 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001272
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001273 if (callchain->record_mode == CALLCHAIN_NONE)
1274 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001275
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001276 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001277 return 0;
1278}
1279
Jiri Olsaeb853e82014-02-03 12:44:42 +01001280static int perf_record_config(const char *var, const char *value, void *cb)
1281{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001282 struct record *rec = cb;
1283
1284 if (!strcmp(var, "record.build-id")) {
1285 if (!strcmp(value, "cache"))
1286 rec->no_buildid_cache = false;
1287 else if (!strcmp(value, "no-cache"))
1288 rec->no_buildid_cache = true;
1289 else if (!strcmp(value, "skip"))
1290 rec->no_buildid = true;
1291 else
1292 return -1;
1293 return 0;
1294 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001295 if (!strcmp(var, "record.call-graph")) {
1296 var = "call-graph.record-mode";
1297 return perf_default_config(var, value, cb);
1298 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001299
Yisheng Xiecff17202018-03-12 19:25:57 +08001300 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001301}
1302
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001303struct clockid_map {
1304 const char *name;
1305 int clockid;
1306};
1307
1308#define CLOCKID_MAP(n, c) \
1309 { .name = n, .clockid = (c), }
1310
1311#define CLOCKID_END { .name = NULL, }
1312
1313
1314/*
1315 * Add the missing ones, we need to build on many distros...
1316 */
1317#ifndef CLOCK_MONOTONIC_RAW
1318#define CLOCK_MONOTONIC_RAW 4
1319#endif
1320#ifndef CLOCK_BOOTTIME
1321#define CLOCK_BOOTTIME 7
1322#endif
1323#ifndef CLOCK_TAI
1324#define CLOCK_TAI 11
1325#endif
1326
1327static const struct clockid_map clockids[] = {
1328 /* available for all events, NMI safe */
1329 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1330 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1331
1332 /* available for some events */
1333 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1334 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1335 CLOCKID_MAP("tai", CLOCK_TAI),
1336
1337 /* available for the lazy */
1338 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1339 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1340 CLOCKID_MAP("real", CLOCK_REALTIME),
1341 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1342
1343 CLOCKID_END,
1344};
1345
Alexey Budankovcf790512018-10-09 17:36:24 +03001346static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1347{
1348 struct timespec res;
1349
1350 *res_ns = 0;
1351 if (!clock_getres(clk_id, &res))
1352 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1353 else
1354 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1355
1356 return 0;
1357}
1358
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001359static int parse_clockid(const struct option *opt, const char *str, int unset)
1360{
1361 struct record_opts *opts = (struct record_opts *)opt->value;
1362 const struct clockid_map *cm;
1363 const char *ostr = str;
1364
1365 if (unset) {
1366 opts->use_clockid = 0;
1367 return 0;
1368 }
1369
1370 /* no arg passed */
1371 if (!str)
1372 return 0;
1373
1374 /* no setting it twice */
1375 if (opts->use_clockid)
1376 return -1;
1377
1378 opts->use_clockid = true;
1379
1380 /* if its a number, we're done */
1381 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001382 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001383
1384 /* allow a "CLOCK_" prefix to the name */
1385 if (!strncasecmp(str, "CLOCK_", 6))
1386 str += 6;
1387
1388 for (cm = clockids; cm->name; cm++) {
1389 if (!strcasecmp(str, cm->name)) {
1390 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001391 return get_clockid_res(opts->clockid,
1392 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001393 }
1394 }
1395
1396 opts->use_clockid = false;
1397 ui__warning("unknown clockid %s, check man page\n", ostr);
1398 return -1;
1399}
1400
Adrian Huntere9db1312015-04-09 18:53:46 +03001401static int record__parse_mmap_pages(const struct option *opt,
1402 const char *str,
1403 int unset __maybe_unused)
1404{
1405 struct record_opts *opts = opt->value;
1406 char *s, *p;
1407 unsigned int mmap_pages;
1408 int ret;
1409
1410 if (!str)
1411 return -EINVAL;
1412
1413 s = strdup(str);
1414 if (!s)
1415 return -ENOMEM;
1416
1417 p = strchr(s, ',');
1418 if (p)
1419 *p = '\0';
1420
1421 if (*s) {
1422 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1423 if (ret)
1424 goto out_free;
1425 opts->mmap_pages = mmap_pages;
1426 }
1427
1428 if (!p) {
1429 ret = 0;
1430 goto out_free;
1431 }
1432
1433 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1434 if (ret)
1435 goto out_free;
1436
1437 opts->auxtrace_mmap_pages = mmap_pages;
1438
1439out_free:
1440 free(s);
1441 return ret;
1442}
1443
Jiri Olsa0c582442017-01-09 10:51:59 +01001444static void switch_output_size_warn(struct record *rec)
1445{
1446 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1447 struct switch_output *s = &rec->switch_output;
1448
1449 wakeup_size /= 2;
1450
1451 if (s->size < wakeup_size) {
1452 char buf[100];
1453
1454 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1455 pr_warning("WARNING: switch-output data size lower than "
1456 "wakeup kernel buffer size (%s) "
1457 "expect bigger perf.data sizes\n", buf);
1458 }
1459}
1460
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001461static int switch_output_setup(struct record *rec)
1462{
1463 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001464 static struct parse_tag tags_size[] = {
1465 { .tag = 'B', .mult = 1 },
1466 { .tag = 'K', .mult = 1 << 10 },
1467 { .tag = 'M', .mult = 1 << 20 },
1468 { .tag = 'G', .mult = 1 << 30 },
1469 { .tag = 0 },
1470 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001471 static struct parse_tag tags_time[] = {
1472 { .tag = 's', .mult = 1 },
1473 { .tag = 'm', .mult = 60 },
1474 { .tag = 'h', .mult = 60*60 },
1475 { .tag = 'd', .mult = 60*60*24 },
1476 { .tag = 0 },
1477 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001478 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001479
1480 if (!s->set)
1481 return 0;
1482
1483 if (!strcmp(s->str, "signal")) {
1484 s->signal = true;
1485 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001486 goto enabled;
1487 }
1488
1489 val = parse_tag_value(s->str, tags_size);
1490 if (val != (unsigned long) -1) {
1491 s->size = val;
1492 pr_debug("switch-output with %s size threshold\n", s->str);
1493 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001494 }
1495
Jiri Olsabfacbe32017-01-09 10:52:00 +01001496 val = parse_tag_value(s->str, tags_time);
1497 if (val != (unsigned long) -1) {
1498 s->time = val;
1499 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1500 s->str, s->time);
1501 goto enabled;
1502 }
1503
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001504 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001505
1506enabled:
1507 rec->timestamp_filename = true;
1508 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001509
1510 if (s->size && !rec->opts.no_buffering)
1511 switch_output_size_warn(rec);
1512
Jiri Olsadc0c6122017-01-09 10:51:58 +01001513 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001514}
1515
Namhyung Kime5b2c202014-10-23 00:15:46 +09001516static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001517 "perf record [<options>] [<command>]",
1518 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001519 NULL
1520};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001521const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001522
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001523/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001524 * XXX Ideally would be local to cmd_record() and passed to a record__new
1525 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001526 * after cmd_record() exits, but since record_options need to be accessible to
1527 * builtin-script, leave it here.
1528 *
1529 * At least we don't ouch it in all the other functions here directly.
1530 *
1531 * Just say no to tons of global variables, sigh.
1532 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001533static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001534 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001535 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001536 .mmap_pages = UINT_MAX,
1537 .user_freq = UINT_MAX,
1538 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001539 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001540 .target = {
1541 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001542 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001543 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001544 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001545 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001546 .tool = {
1547 .sample = process_sample_event,
1548 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001549 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001550 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301551 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001552 .mmap = perf_event__process_mmap,
1553 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001554 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001555 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001556};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001557
Namhyung Kim76a26542015-10-22 23:28:32 +09001558const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1559 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001560
Wang Nan0aab2132016-06-16 08:02:41 +00001561static bool dry_run;
1562
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001563/*
1564 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1565 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001566 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001567 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1568 * using pipes, etc.
1569 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001570static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001571 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001572 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001573 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001574 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001575 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001576 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1577 NULL, "don't record events from perf itself",
1578 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001579 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001580 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001581 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001582 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001583 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001584 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001585 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001586 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001587 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001588 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001589 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001590 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001591 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001592 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001593 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001594 OPT_STRING('o', "output", &record.data.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001595 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001596 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1597 &record.opts.no_inherit_set,
1598 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001599 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1600 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001601 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001602 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1603 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001604 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1605 "profile at this frequency",
1606 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001607 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1608 "number of mmap data pages and AUX area tracing mmap pages",
1609 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001610 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001611 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001612 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001613 NULL, "enables call-graph recording" ,
1614 &record_callchain_opt),
1615 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001616 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001617 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001618 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001619 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001620 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001621 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001622 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001623 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001624 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1625 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001626 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001627 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1628 &record.opts.sample_time_set,
1629 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001630 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1631 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001632 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001633 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001634 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1635 &record.no_buildid_cache_set,
1636 "do not update the buildid cache"),
1637 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1638 &record.no_buildid_set,
1639 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001640 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001641 "monitor event in cgroup name only",
1642 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001643 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001644 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001645 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1646 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001647
1648 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1649 "branch any", "sample any taken branches",
1650 parse_branch_stack),
1651
1652 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1653 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001654 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001655 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1656 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001657 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1658 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001659 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1660 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001661 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1662 "sample selected machine registers on interrupt,"
1663 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001664 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1665 "sample selected machine registers on interrupt,"
1666 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001667 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1668 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001669 OPT_CALLBACK('k', "clockid", &record.opts,
1670 "clockid", "clockid to use for events, see clock_gettime()",
1671 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001672 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1673 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001674 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1675 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301676 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1677 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001678 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1679 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001680 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1681 "Configure all used events to run in kernel space.",
1682 PARSE_OPT_EXCLUSIVE),
1683 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1684 "Configure all used events to run in user space.",
1685 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001686 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1687 "clang binary to use for compiling BPF scriptlets"),
1688 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1689 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001690 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1691 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001692 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1693 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001694 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1695 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08001696 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1697 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001698 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001699 &record.switch_output.set, "signal,size,time",
1700 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001701 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001702 OPT_BOOLEAN(0, "dry-run", &dry_run,
1703 "Parse options then exit"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001704 OPT_END()
1705};
1706
Namhyung Kime5b2c202014-10-23 00:15:46 +09001707struct option *record_options = __record_options;
1708
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001709int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001710{
Adrian Hunteref149c22015-04-09 18:53:45 +03001711 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001712 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001713 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001714
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001715 setlocale(LC_ALL, "");
1716
Wang Nan48e1cab2015-12-14 10:39:22 +00001717#ifndef HAVE_LIBBPF_SUPPORT
1718# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1719 set_nobuild('\0', "clang-path", true);
1720 set_nobuild('\0', "clang-opt", true);
1721# undef set_nobuild
1722#endif
1723
He Kuang7efe0e02015-12-14 10:39:23 +00001724#ifndef HAVE_BPF_PROLOGUE
1725# if !defined (HAVE_DWARF_SUPPORT)
1726# define REASON "NO_DWARF=1"
1727# elif !defined (HAVE_LIBBPF_SUPPORT)
1728# define REASON "NO_LIBBPF=1"
1729# else
1730# define REASON "this architecture doesn't support BPF prologue"
1731# endif
1732# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1733 set_nobuild('\0', "vmlinux", true);
1734# undef set_nobuild
1735# undef REASON
1736#endif
1737
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001738 rec->evlist = perf_evlist__new();
1739 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001740 return -ENOMEM;
1741
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001742 err = perf_config(perf_record_config, rec);
1743 if (err)
1744 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001745
Tom Zanussibca647a2010-11-10 08:11:30 -06001746 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001747 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09001748 if (quiet)
1749 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01001750
1751 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001752 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01001753 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001754
Namhyung Kimbea03402012-04-26 14:15:15 +09001755 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001756 usage_with_options_msg(record_usage, record_options,
1757 "cgroup monitoring only available in system-wide mode");
1758
Stephane Eranian023695d2011-02-14 11:20:01 +02001759 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001760 if (rec->opts.record_switch_events &&
1761 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001762 ui__error("kernel does not support recording context switch events\n");
1763 parse_options_usage(record_usage, record_options, "switch-events", 0);
1764 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001765 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001766
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001767 if (switch_output_setup(rec)) {
1768 parse_options_usage(record_usage, record_options, "switch-output", 0);
1769 return -EINVAL;
1770 }
1771
Jiri Olsabfacbe32017-01-09 10:52:00 +01001772 if (rec->switch_output.time) {
1773 signal(SIGALRM, alarm_sig_handler);
1774 alarm(rec->switch_output.time);
1775 }
1776
Adrian Hunter1b36c032016-09-23 17:38:39 +03001777 /*
1778 * Allow aliases to facilitate the lookup of symbols for address
1779 * filters. Refer to auxtrace_parse_filters().
1780 */
1781 symbol_conf.allow_aliases = true;
1782
1783 symbol__init(NULL);
1784
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02001785 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03001786 if (err)
1787 goto out;
1788
Wang Nan0aab2132016-06-16 08:02:41 +00001789 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001790 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00001791
Wang Nand7888572016-04-08 15:07:24 +00001792 err = bpf__setup_stdout(rec->evlist);
1793 if (err) {
1794 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1795 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1796 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001797 goto out;
Wang Nand7888572016-04-08 15:07:24 +00001798 }
1799
Adrian Hunteref149c22015-04-09 18:53:45 +03001800 err = -ENOMEM;
1801
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001802 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03001803 pr_warning(
1804"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1805"check /proc/sys/kernel/kptr_restrict.\n\n"
1806"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1807"file is not found in the buildid cache or in the vmlinux path.\n\n"
1808"Samples in kernel modules won't be resolved at all.\n\n"
1809"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1810"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001811
Wang Nan0c1d46a2016-04-20 18:59:52 +00001812 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001813 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01001814 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00001815 /*
1816 * In 'perf record --switch-output', disable buildid
1817 * generation by default to reduce data file switching
1818 * overhead. Still generate buildid if they are required
1819 * explicitly using
1820 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01001821 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00001822 * --no-no-buildid-cache
1823 *
1824 * Following code equals to:
1825 *
1826 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1827 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1828 * disable_buildid_cache();
1829 */
1830 bool disable = true;
1831
1832 if (rec->no_buildid_set && !rec->no_buildid)
1833 disable = false;
1834 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1835 disable = false;
1836 if (disable) {
1837 rec->no_buildid = true;
1838 rec->no_buildid_cache = true;
1839 disable_buildid_cache();
1840 }
1841 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001842
Wang Nan4ea648a2016-07-14 08:34:47 +00001843 if (record.opts.overwrite)
1844 record.opts.tail_synthesize = true;
1845
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001846 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03001847 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001848 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03001849 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001850 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001851
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001852 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1853 rec->opts.no_inherit = true;
1854
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001855 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001856 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001857 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01001858 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001859 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001860
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001861 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001862 if (err) {
1863 int saved_errno = errno;
1864
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001865 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001866 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001867
1868 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001869 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001870 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001871
Mengting Zhangca800062017-12-13 15:01:53 +08001872 /* Enable ignoring missing threads when -u/-p option is defined. */
1873 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01001874
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001875 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001876 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001877 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001878
Adrian Hunteref149c22015-04-09 18:53:45 +03001879 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1880 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03001881 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001882
Namhyung Kim61566812016-01-11 22:37:09 +09001883 /*
1884 * We take all buildids when the file contains
1885 * AUX area tracing data because we do not decode the
1886 * trace because it would take too long.
1887 */
1888 if (rec->opts.full_auxtrace)
1889 rec->buildid_all = true;
1890
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001891 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001892 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001893 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001894 }
1895
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001896 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03001897out:
Namhyung Kim45604712014-05-12 09:47:24 +09001898 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001899 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001900 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001901 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001902}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001903
1904static void snapshot_sig_handler(int sig __maybe_unused)
1905{
Jiri Olsadc0c6122017-01-09 10:51:58 +01001906 struct record *rec = &record;
1907
Wang Nan5f9cf592016-04-20 18:59:49 +00001908 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1909 trigger_hit(&auxtrace_snapshot_trigger);
1910 auxtrace_record__snapshot_started = 1;
1911 if (auxtrace_record__snapshot_start(record.itr))
1912 trigger_error(&auxtrace_snapshot_trigger);
1913 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001914
Jiri Olsadc0c6122017-01-09 10:51:58 +01001915 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001916 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001917}
Jiri Olsabfacbe32017-01-09 10:52:00 +01001918
1919static void alarm_sig_handler(int sig __maybe_unused)
1920{
1921 struct record *rec = &record;
1922
1923 if (switch_output_time(rec))
1924 trigger_hit(&switch_output_trigger);
1925}