blob: 408d6477c960b21694993b5eef26e283d0765398 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060026#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020027#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020028#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020029#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110030#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020031#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020032#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020033#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030034#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020035#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070036#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020037#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020045
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030046#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030047#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030048#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030049#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020050#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020051#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030052#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030053#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030055#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030056
Jiri Olsa1b43b702017-01-09 10:51:56 +010057struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010058 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010059 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010060 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010061 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010062 const char *str;
63 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010064};
65
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030066struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020067 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030068 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020069 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010070 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030071 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020072 struct perf_evlist *evlist;
73 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020074 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000076 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000078 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090079 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000080 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080081 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010082 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070083 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020084};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020085
Jiri Olsadc0c6122017-01-09 10:51:58 +010086static volatile int auxtrace_record__snapshot_started;
87static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
88static DEFINE_TRIGGER(switch_output_trigger);
89
90static bool switch_output_signal(struct record *rec)
91{
92 return rec->switch_output.signal &&
93 trigger_is_ready(&switch_output_trigger);
94}
95
96static bool switch_output_size(struct record *rec)
97{
98 return rec->switch_output.size &&
99 trigger_is_ready(&switch_output_trigger) &&
100 (rec->bytes_written >= rec->switch_output.size);
101}
102
Jiri Olsabfacbe32017-01-09 10:52:00 +0100103static bool switch_output_time(struct record *rec)
104{
105 return rec->switch_output.time &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200109static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
110 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200111{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200112 struct perf_data_file *file = &rec->session->data->file;
113
114 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100115 pr_err("failed to write perf data, error: %m\n");
116 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200117 }
David Ahern8d3eca22012-08-26 12:24:47 -0600118
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300119 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100120
121 if (switch_output_size(rec))
122 trigger_hit(&switch_output_trigger);
123
David Ahern8d3eca22012-08-26 12:24:47 -0600124 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200125}
126
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300127#ifdef HAVE_AIO_SUPPORT
128static int record__aio_write(struct aiocb *cblock, int trace_fd,
129 void *buf, size_t size, off_t off)
130{
131 int rc;
132
133 cblock->aio_fildes = trace_fd;
134 cblock->aio_buf = buf;
135 cblock->aio_nbytes = size;
136 cblock->aio_offset = off;
137 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
138
139 do {
140 rc = aio_write(cblock);
141 if (rc == 0) {
142 break;
143 } else if (errno != EAGAIN) {
144 cblock->aio_fildes = -1;
145 pr_err("failed to queue perf data, error: %m\n");
146 break;
147 }
148 } while (1);
149
150 return rc;
151}
152
153static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
154{
155 void *rem_buf;
156 off_t rem_off;
157 size_t rem_size;
158 int rc, aio_errno;
159 ssize_t aio_ret, written;
160
161 aio_errno = aio_error(cblock);
162 if (aio_errno == EINPROGRESS)
163 return 0;
164
165 written = aio_ret = aio_return(cblock);
166 if (aio_ret < 0) {
167 if (aio_errno != EINTR)
168 pr_err("failed to write perf data, error: %m\n");
169 written = 0;
170 }
171
172 rem_size = cblock->aio_nbytes - written;
173
174 if (rem_size == 0) {
175 cblock->aio_fildes = -1;
176 /*
177 * md->refcount is incremented in perf_mmap__push() for
178 * every enqueued aio write request so decrement it because
179 * the request is now complete.
180 */
181 perf_mmap__put(md);
182 rc = 1;
183 } else {
184 /*
185 * aio write request may require restart with the
186 * reminder if the kernel didn't write whole
187 * chunk at once.
188 */
189 rem_off = cblock->aio_offset + written;
190 rem_buf = (void *)(cblock->aio_buf + written);
191 record__aio_write(cblock, cblock->aio_fildes,
192 rem_buf, rem_size, rem_off);
193 rc = 0;
194 }
195
196 return rc;
197}
198
199static void record__aio_sync(struct perf_mmap *md)
200{
201 struct aiocb *cblock = &md->aio.cblock;
202 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
203
204 do {
205 if (cblock->aio_fildes == -1 || record__aio_complete(md, cblock))
206 return;
207
208 while (aio_suspend((const struct aiocb**)&cblock, 1, &timeout)) {
209 if (!(errno == EAGAIN || errno == EINTR))
210 pr_err("failed to sync perf data, error: %m\n");
211 }
212 } while (1);
213}
214
215static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
216{
217 struct record *rec = to;
218 int ret, trace_fd = rec->session->data->file.fd;
219
220 rec->samples++;
221
222 ret = record__aio_write(cblock, trace_fd, bf, size, off);
223 if (!ret) {
224 rec->bytes_written += size;
225 if (switch_output_size(rec))
226 trigger_hit(&switch_output_trigger);
227 }
228
229 return ret;
230}
231
232static off_t record__aio_get_pos(int trace_fd)
233{
234 return lseek(trace_fd, 0, SEEK_CUR);
235}
236
237static void record__aio_set_pos(int trace_fd, off_t pos)
238{
239 lseek(trace_fd, pos, SEEK_SET);
240}
241
242static void record__aio_mmap_read_sync(struct record *rec)
243{
244 int i;
245 struct perf_evlist *evlist = rec->evlist;
246 struct perf_mmap *maps = evlist->mmap;
247
248 if (!rec->opts.nr_cblocks)
249 return;
250
251 for (i = 0; i < evlist->nr_mmaps; i++) {
252 struct perf_mmap *map = &maps[i];
253
254 if (map->base)
255 record__aio_sync(map);
256 }
257}
258
259static int nr_cblocks_default = 1;
260
261static int record__aio_parse(const struct option *opt,
262 const char *str __maybe_unused,
263 int unset)
264{
265 struct record_opts *opts = (struct record_opts *)opt->value;
266
267 if (unset)
268 opts->nr_cblocks = 0;
269 else
270 opts->nr_cblocks = nr_cblocks_default;
271
272 return 0;
273}
274#else /* HAVE_AIO_SUPPORT */
275static void record__aio_sync(struct perf_mmap *md __maybe_unused)
276{
277}
278
279static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
280 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
281{
282 return -1;
283}
284
285static off_t record__aio_get_pos(int trace_fd __maybe_unused)
286{
287 return -1;
288}
289
290static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
291{
292}
293
294static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
295{
296}
297#endif
298
299static int record__aio_enabled(struct record *rec)
300{
301 return rec->opts.nr_cblocks > 0;
302}
303
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200304static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200305 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300306 struct perf_sample *sample __maybe_unused,
307 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200308{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300309 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200310 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200311}
312
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200313static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300314{
315 struct record *rec = to;
316
317 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200318 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300319}
320
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300321static volatile int done;
322static volatile int signr = -1;
323static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000324
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300325static void sig_handler(int sig)
326{
327 if (sig == SIGCHLD)
328 child_finished = 1;
329 else
330 signr = sig;
331
332 done = 1;
333}
334
Wang Nana0748652016-11-26 07:03:28 +0000335static void sigsegv_handler(int sig)
336{
337 perf_hooks__recover();
338 sighandler_dump_stack(sig);
339}
340
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300341static void record__sig_exit(void)
342{
343 if (signr == -1)
344 return;
345
346 signal(signr, SIG_DFL);
347 raise(signr);
348}
349
Adrian Huntere31f0d02015-04-30 17:37:27 +0300350#ifdef HAVE_AUXTRACE_SUPPORT
351
Adrian Hunteref149c22015-04-09 18:53:45 +0300352static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200353 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300354 union perf_event *event, void *data1,
355 size_t len1, void *data2, size_t len2)
356{
357 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100358 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300359 size_t padding;
360 u8 pad[8] = {0};
361
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100362 if (!perf_data__is_pipe(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300363 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100364 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300365 int err;
366
367 file_offset = lseek(fd, 0, SEEK_CUR);
368 if (file_offset == -1)
369 return -1;
370 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
371 event, file_offset);
372 if (err)
373 return err;
374 }
375
Adrian Hunteref149c22015-04-09 18:53:45 +0300376 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
377 padding = (len1 + len2) & 7;
378 if (padding)
379 padding = 8 - padding;
380
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200381 record__write(rec, map, event, event->header.size);
382 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300383 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200384 record__write(rec, map, data2, len2);
385 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300386
387 return 0;
388}
389
390static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200391 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300392{
393 int ret;
394
Jiri Olsae035f4c2018-09-13 14:54:05 +0200395 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300396 record__process_auxtrace);
397 if (ret < 0)
398 return ret;
399
400 if (ret)
401 rec->samples++;
402
403 return 0;
404}
405
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300406static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200407 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300408{
409 int ret;
410
Jiri Olsae035f4c2018-09-13 14:54:05 +0200411 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300412 record__process_auxtrace,
413 rec->opts.auxtrace_snapshot_size);
414 if (ret < 0)
415 return ret;
416
417 if (ret)
418 rec->samples++;
419
420 return 0;
421}
422
423static int record__auxtrace_read_snapshot_all(struct record *rec)
424{
425 int i;
426 int rc = 0;
427
428 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200429 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300430
Jiri Olsae035f4c2018-09-13 14:54:05 +0200431 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300432 continue;
433
Jiri Olsae035f4c2018-09-13 14:54:05 +0200434 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300435 rc = -1;
436 goto out;
437 }
438 }
439out:
440 return rc;
441}
442
443static void record__read_auxtrace_snapshot(struct record *rec)
444{
445 pr_debug("Recording AUX area tracing snapshot\n");
446 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000447 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300448 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000449 if (auxtrace_record__snapshot_finish(rec->itr))
450 trigger_error(&auxtrace_snapshot_trigger);
451 else
452 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300453 }
454}
455
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200456static int record__auxtrace_init(struct record *rec)
457{
458 int err;
459
460 if (!rec->itr) {
461 rec->itr = auxtrace_record__init(rec->evlist, &err);
462 if (err)
463 return err;
464 }
465
466 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
467 rec->opts.auxtrace_snapshot_opts);
468 if (err)
469 return err;
470
471 return auxtrace_parse_filters(rec->evlist);
472}
473
Adrian Huntere31f0d02015-04-30 17:37:27 +0300474#else
475
476static inline
477int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200478 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300479{
480 return 0;
481}
482
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300483static inline
484void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
485{
486}
487
488static inline
489int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
490{
491 return 0;
492}
493
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200494static int record__auxtrace_init(struct record *rec __maybe_unused)
495{
496 return 0;
497}
498
Adrian Huntere31f0d02015-04-30 17:37:27 +0300499#endif
500
Wang Nancda57a82016-06-27 10:24:03 +0000501static int record__mmap_evlist(struct record *rec,
502 struct perf_evlist *evlist)
503{
504 struct record_opts *opts = &rec->opts;
505 char msg[512];
506
Wang Nan7a276ff2017-12-03 02:00:38 +0000507 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000508 opts->auxtrace_mmap_pages,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300509 opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000510 if (errno == EPERM) {
511 pr_err("Permission error mapping pages.\n"
512 "Consider increasing "
513 "/proc/sys/kernel/perf_event_mlock_kb,\n"
514 "or try again with a smaller value of -m/--mmap_pages.\n"
515 "(current value: %u,%u)\n",
516 opts->mmap_pages, opts->auxtrace_mmap_pages);
517 return -errno;
518 } else {
519 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300520 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000521 if (errno)
522 return -errno;
523 else
524 return -EINVAL;
525 }
526 }
527 return 0;
528}
529
530static int record__mmap(struct record *rec)
531{
532 return record__mmap_evlist(rec, rec->evlist);
533}
534
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300535static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200536{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300537 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200538 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200539 struct perf_evlist *evlist = rec->evlist;
540 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300541 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600542 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600543 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200544
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300545 /*
546 * For initial_delay we need to add a dummy event so that we can track
547 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
548 * real events, the ones asked by the user.
549 */
550 if (opts->initial_delay) {
551 if (perf_evlist__add_dummy(evlist))
552 return -ENOMEM;
553
554 pos = perf_evlist__first(evlist);
555 pos->tracking = 0;
556 pos = perf_evlist__last(evlist);
557 pos->tracking = 1;
558 pos->attr.enable_on_exec = 1;
559 }
560
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300561 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100562
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300563 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200564try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400565 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300566 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900567 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300568 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300569 goto try_again;
570 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700571 if ((errno == EINVAL || errno == EBADF) &&
572 pos->leader != pos &&
573 pos->weak_group) {
574 pos = perf_evlist__reset_weak_group(evlist, pos);
575 goto try_again;
576 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300577 rc = -errno;
578 perf_evsel__open_strerror(pos, &opts->target,
579 errno, msg, sizeof(msg));
580 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600581 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300582 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800583
584 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800585 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200586
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300587 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300588 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300589 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300590 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600591 rc = -1;
592 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100593 }
594
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600595 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300596 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600597 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
598 str_error_r(errno, msg, sizeof(msg)));
599 rc = -1;
600 goto out;
601 }
602
Wang Nancda57a82016-06-27 10:24:03 +0000603 rc = record__mmap(rec);
604 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600605 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200606
Jiri Olsa563aecb2013-06-05 13:35:06 +0200607 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300608 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600609out:
610 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200611}
612
Namhyung Kime3d59112015-01-29 17:06:44 +0900613static int process_sample_event(struct perf_tool *tool,
614 union perf_event *event,
615 struct perf_sample *sample,
616 struct perf_evsel *evsel,
617 struct machine *machine)
618{
619 struct record *rec = container_of(tool, struct record, tool);
620
Jin Yao68588ba2017-12-08 21:13:42 +0800621 if (rec->evlist->first_sample_time == 0)
622 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900623
Jin Yao68588ba2017-12-08 21:13:42 +0800624 rec->evlist->last_sample_time = sample->time;
625
626 if (rec->buildid_all)
627 return 0;
628
629 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900630 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
631}
632
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300633static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200634{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100635 struct perf_data *data = &rec->data;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200636 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200637
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100638 if (data->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300639 return 0;
640
Namhyung Kim00dc8652014-11-04 10:14:32 +0900641 /*
642 * During this process, it'll load kernel map and replace the
643 * dso->long_name to a real pathname it found. In this case
644 * we prefer the vmlinux path like
645 * /lib/modules/3.16.4/build/vmlinux
646 *
647 * rather than build-id path (in debug directory).
648 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
649 */
650 symbol_conf.ignore_vmlinux_buildid = true;
651
Namhyung Kim61566812016-01-11 22:37:09 +0900652 /*
653 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800654 * so no need to process samples. But if timestamp_boundary is enabled,
655 * it still needs to walk on all samples to get the timestamps of
656 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900657 */
Jin Yao68588ba2017-12-08 21:13:42 +0800658 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900659 rec->tool.sample = NULL;
660
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300661 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200662}
663
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200664static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800665{
666 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200667 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800668 /*
669 *As for guest kernel when processing subcommand record&report,
670 *we arrange module mmap prior to guest kernel mmap and trigger
671 *a preload dso because default guest module symbols are loaded
672 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
673 *method is used to avoid symbol missing when the first addr is
674 *in module instead of in guest kernel.
675 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200676 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200677 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800678 if (err < 0)
679 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300680 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800681
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800682 /*
683 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
684 * have no _text sometimes.
685 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200686 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200687 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800688 if (err < 0)
689 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300690 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800691}
692
Frederic Weisbecker98402802010-05-02 22:05:29 +0200693static struct perf_event_header finished_round_event = {
694 .size = sizeof(struct perf_event_header),
695 .type = PERF_RECORD_FINISHED_ROUND,
696};
697
Wang Nana4ea0ec2016-07-14 08:34:36 +0000698static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Wang Nan0b72d692017-12-04 16:51:07 +0000699 bool overwrite)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200700{
Jiri Olsadcabb502014-07-25 16:56:16 +0200701 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200702 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600703 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000704 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300705 int trace_fd = rec->data.file.fd;
706 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200707
Wang Nancb216862016-06-27 10:24:04 +0000708 if (!evlist)
709 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300710
Wang Nan0b72d692017-12-04 16:51:07 +0000711 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000712 if (!maps)
713 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000714
Wang Nan0b72d692017-12-04 16:51:07 +0000715 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000716 return 0;
717
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300718 if (record__aio_enabled(rec))
719 off = record__aio_get_pos(trace_fd);
720
Wang Nana4ea0ec2016-07-14 08:34:36 +0000721 for (i = 0; i < evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200722 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000723
Jiri Olsae035f4c2018-09-13 14:54:05 +0200724 if (map->base) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300725 if (!record__aio_enabled(rec)) {
726 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
727 rc = -1;
728 goto out;
729 }
730 } else {
731 /*
732 * Call record__aio_sync() to wait till map->data buffer
733 * becomes available after previous aio write request.
734 */
735 record__aio_sync(map);
736 if (perf_mmap__aio_push(map, rec, record__aio_pushfn, &off) != 0) {
737 record__aio_set_pos(trace_fd, off);
738 rc = -1;
739 goto out;
740 }
David Ahern8d3eca22012-08-26 12:24:47 -0600741 }
742 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300743
Jiri Olsae035f4c2018-09-13 14:54:05 +0200744 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
745 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300746 rc = -1;
747 goto out;
748 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200749 }
750
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300751 if (record__aio_enabled(rec))
752 record__aio_set_pos(trace_fd, off);
753
Jiri Olsadcabb502014-07-25 16:56:16 +0200754 /*
755 * Mark the round finished in case we wrote
756 * at least one event.
757 */
758 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200759 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600760
Wang Nan0b72d692017-12-04 16:51:07 +0000761 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000762 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600763out:
764 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200765}
766
Wang Nancb216862016-06-27 10:24:04 +0000767static int record__mmap_read_all(struct record *rec)
768{
769 int err;
770
Wang Nana4ea0ec2016-07-14 08:34:36 +0000771 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000772 if (err)
773 return err;
774
Wang Nan057374642016-07-14 08:34:43 +0000775 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000776}
777
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300778static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700779{
David Ahern57706ab2013-11-06 11:41:34 -0700780 struct perf_session *session = rec->session;
781 int feat;
782
783 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
784 perf_header__set_feat(&session->header, feat);
785
786 if (rec->no_buildid)
787 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
788
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300789 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700790 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
791
792 if (!rec->opts.branch_stack)
793 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300794
795 if (!rec->opts.full_auxtrace)
796 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100797
Alexey Budankovcf790512018-10-09 17:36:24 +0300798 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
799 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
800
Jiri Olsaffa517a2015-10-25 15:51:43 +0100801 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700802}
803
Wang Nane1ab48b2016-02-26 09:32:10 +0000804static void
805record__finish_output(struct record *rec)
806{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100807 struct perf_data *data = &rec->data;
808 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000809
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100810 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000811 return;
812
813 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100814 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000815
816 if (!rec->no_buildid) {
817 process_buildids(rec);
818
819 if (rec->buildid_all)
820 dsos__hit_all(rec->session);
821 }
822 perf_session__write_header(rec->session, rec->evlist, fd, true);
823
824 return;
825}
826
Wang Nan4ea648a2016-07-14 08:34:47 +0000827static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000828{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300829 int err;
830 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000831
Wang Nan4ea648a2016-07-14 08:34:47 +0000832 if (rec->opts.tail_synthesize != tail)
833 return 0;
834
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300835 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
836 if (thread_map == NULL)
837 return -1;
838
839 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000840 process_synthesized_event,
841 &rec->session->machines.host,
842 rec->opts.sample_address,
843 rec->opts.proc_map_timeout);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300844 thread_map__put(thread_map);
845 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000846}
847
Wang Nan4ea648a2016-07-14 08:34:47 +0000848static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000849
Wang Nanecfd7a92016-04-13 08:21:07 +0000850static int
851record__switch_output(struct record *rec, bool at_exit)
852{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100853 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000854 int fd, err;
855
856 /* Same Size: "2015122520103046"*/
857 char timestamp[] = "InvalidTimestamp";
858
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300859 record__aio_mmap_read_sync(rec);
860
Wang Nan4ea648a2016-07-14 08:34:47 +0000861 record__synthesize(rec, true);
862 if (target__none(&rec->opts.target))
863 record__synthesize_workload(rec, true);
864
Wang Nanecfd7a92016-04-13 08:21:07 +0000865 rec->samples = 0;
866 record__finish_output(rec);
867 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
868 if (err) {
869 pr_err("Failed to get current timestamp\n");
870 return -EINVAL;
871 }
872
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100873 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000874 rec->session->header.data_offset,
875 at_exit);
876 if (fd >= 0 && !at_exit) {
877 rec->bytes_written = 0;
878 rec->session->header.data_size = 0;
879 }
880
881 if (!quiet)
882 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +0100883 data->file.path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000884
885 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000886 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000887 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000888
Wang Nanbe7b0c92016-04-20 18:59:54 +0000889 /*
890 * In 'perf record --switch-output' without -a,
891 * record__synthesize() in record__switch_output() won't
892 * generate tracking events because there's no thread_map
893 * in evlist. Which causes newly created perf.data doesn't
894 * contain map and comm information.
895 * Create a fake thread_map and directly call
896 * perf_event__synthesize_thread_map() for those events.
897 */
898 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000899 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000900 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000901 return fd;
902}
903
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300904static volatile int workload_exec_errno;
905
906/*
907 * perf_evlist__prepare_workload will send a SIGUSR1
908 * if the fork fails, since we asked by setting its
909 * want_signal to true.
910 */
Namhyung Kim45604712014-05-12 09:47:24 +0900911static void workload_exec_failed_signal(int signo __maybe_unused,
912 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300913 void *ucontext __maybe_unused)
914{
915 workload_exec_errno = info->si_value.sival_int;
916 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300917 child_finished = 1;
918}
919
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300920static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100921static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300922
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200923int __weak
924perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
925 struct perf_tool *tool __maybe_unused,
926 perf_event__handler_t process __maybe_unused,
927 struct machine *machine __maybe_unused)
928{
929 return 0;
930}
931
Wang Nanee667f92016-06-27 10:24:05 +0000932static const struct perf_event_mmap_page *
933perf_evlist__pick_pc(struct perf_evlist *evlist)
934{
Wang Nanb2cb6152016-07-14 08:34:39 +0000935 if (evlist) {
936 if (evlist->mmap && evlist->mmap[0].base)
937 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +0000938 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
939 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +0000940 }
Wang Nanee667f92016-06-27 10:24:05 +0000941 return NULL;
942}
943
Wang Nanc45628b2016-05-24 02:28:59 +0000944static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
945{
Wang Nanee667f92016-06-27 10:24:05 +0000946 const struct perf_event_mmap_page *pc;
947
948 pc = perf_evlist__pick_pc(rec->evlist);
949 if (pc)
950 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000951 return NULL;
952}
953
Wang Nan4ea648a2016-07-14 08:34:47 +0000954static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000955{
956 struct perf_session *session = rec->session;
957 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100958 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000959 struct record_opts *opts = &rec->opts;
960 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100961 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +0000962 int err = 0;
963
Wang Nan4ea648a2016-07-14 08:34:47 +0000964 if (rec->opts.tail_synthesize != tail)
965 return 0;
966
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100967 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +0100968 /*
969 * We need to synthesize events first, because some
970 * features works on top of them (on report side).
971 */
Jiri Olsa318ec182018-08-30 08:32:15 +0200972 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +0000973 process_synthesized_event);
974 if (err < 0) {
975 pr_err("Couldn't synthesize attrs.\n");
976 goto out;
977 }
978
Jiri Olsaa2015512018-03-14 10:22:04 +0100979 err = perf_event__synthesize_features(tool, session, rec->evlist,
980 process_synthesized_event);
981 if (err < 0) {
982 pr_err("Couldn't synthesize features.\n");
983 return err;
984 }
985
Wang Nanc45c86e2016-02-26 09:32:07 +0000986 if (have_tracepoints(&rec->evlist->entries)) {
987 /*
988 * FIXME err <= 0 here actually means that
989 * there were no tracepoints so its not really
990 * an error, just that we don't need to
991 * synthesize anything. We really have to
992 * return this more properly and also
993 * propagate errors that now are calling die()
994 */
995 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
996 process_synthesized_event);
997 if (err <= 0) {
998 pr_err("Couldn't record tracing data.\n");
999 goto out;
1000 }
1001 rec->bytes_written += err;
1002 }
1003 }
1004
Wang Nanc45628b2016-05-24 02:28:59 +00001005 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001006 process_synthesized_event, machine);
1007 if (err)
1008 goto out;
1009
Wang Nanc45c86e2016-02-26 09:32:07 +00001010 if (rec->opts.full_auxtrace) {
1011 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1012 session, process_synthesized_event);
1013 if (err)
1014 goto out;
1015 }
1016
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001017 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1018 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1019 machine);
1020 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1021 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1022 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001023
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001024 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1025 machine);
1026 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1027 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1028 "Check /proc/modules permission or run as root.\n");
1029 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001030
1031 if (perf_guest) {
1032 machines__process_guests(&session->machines,
1033 perf_event__synthesize_guest_os, tool);
1034 }
1035
Andi Kleenbfd8f722017-11-17 13:42:58 -08001036 err = perf_event__synthesize_extra_attr(&rec->tool,
1037 rec->evlist,
1038 process_synthesized_event,
1039 data->is_pipe);
1040 if (err)
1041 goto out;
1042
Andi Kleen373565d2017-11-17 13:42:59 -08001043 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1044 process_synthesized_event,
1045 NULL);
1046 if (err < 0) {
1047 pr_err("Couldn't synthesize thread map.\n");
1048 return err;
1049 }
1050
1051 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1052 process_synthesized_event, NULL);
1053 if (err < 0) {
1054 pr_err("Couldn't synthesize cpu map.\n");
1055 return err;
1056 }
1057
Wang Nanc45c86e2016-02-26 09:32:07 +00001058 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1059 process_synthesized_event, opts->sample_address,
Kan Liang340b47f2017-09-29 07:47:54 -07001060 opts->proc_map_timeout, 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001061out:
1062 return err;
1063}
1064
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001065static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001066{
David Ahern57706ab2013-11-06 11:41:34 -07001067 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001068 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001069 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001070 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001071 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001072 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001073 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001074 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001075 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001076 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001077
Namhyung Kim45604712014-05-12 09:47:24 +09001078 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001079 signal(SIGCHLD, sig_handler);
1080 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001081 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001082 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001083
Hari Bathinif3b36142017-03-08 02:11:43 +05301084 if (rec->opts.record_namespaces)
1085 tool->namespace_events = true;
1086
Jiri Olsadc0c6122017-01-09 10:51:58 +01001087 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001088 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001089 if (rec->opts.auxtrace_snapshot_mode)
1090 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001091 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001092 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001093 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001094 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001095 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001096
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001097 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001098 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001099 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001100 return -1;
1101 }
1102
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001103 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001104 rec->session = session;
1105
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001106 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001107
Alexey Budankovcf790512018-10-09 17:36:24 +03001108 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1109 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1110
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001111 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001112 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001113 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001114 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001115 if (err < 0) {
1116 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001117 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001118 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001119 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001120 }
1121
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001122 /*
1123 * If we have just single event and are sending data
1124 * through pipe, we need to force the ids allocation,
1125 * because we synthesize event name through the pipe
1126 * and need the id for that.
1127 */
1128 if (data->is_pipe && rec->evlist->nr_entries == 1)
1129 rec->opts.sample_id = true;
1130
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001131 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001132 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001133 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001134 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001135
Wang Nan8690a2a2016-02-22 09:10:32 +00001136 err = bpf__apply_obj_config();
1137 if (err) {
1138 char errbuf[BUFSIZ];
1139
1140 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1141 pr_err("ERROR: Apply config to BPF failed: %s\n",
1142 errbuf);
1143 goto out_child;
1144 }
1145
Adrian Huntercca84822015-08-19 17:29:21 +03001146 /*
1147 * Normally perf_session__new would do this, but it doesn't have the
1148 * evlist.
1149 */
1150 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1151 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1152 rec->tool.ordered_events = false;
1153 }
1154
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001155 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001156 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1157
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001158 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001159 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001160 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001161 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001162 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001163 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001164 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001165 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001166 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001167
David Ahernd3665492012-02-06 15:27:52 -07001168 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001169 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001170 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001171 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001172 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001173 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001174 }
1175
Wang Nan4ea648a2016-07-14 08:34:47 +00001176 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001177 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001178 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001179
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001180 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001181 struct sched_param param;
1182
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001183 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001184 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001185 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001186 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001187 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001188 }
1189 }
1190
Jiri Olsa774cb492012-11-12 18:34:01 +01001191 /*
1192 * When perf is starting the traced process, all the events
1193 * (apart from group members) have enable_on_exec=1 set,
1194 * so don't spoil it by prematurely enabling them.
1195 */
Andi Kleen6619a532014-01-11 13:38:27 -08001196 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001197 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001198
Peter Zijlstra856e9662009-12-16 17:55:55 +01001199 /*
1200 * Let the child rip
1201 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001202 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001203 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001204 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301205 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001206
1207 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1208 if (event == NULL) {
1209 err = -ENOMEM;
1210 goto out_child;
1211 }
1212
Namhyung Kime803cf92015-09-22 09:24:55 +09001213 /*
1214 * Some H/W events are generated before COMM event
1215 * which is emitted during exec(), so perf script
1216 * cannot see a correct process name for those events.
1217 * Synthesize COMM event to prevent it.
1218 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301219 tgid = perf_event__synthesize_comm(tool, event,
1220 rec->evlist->workload.pid,
1221 process_synthesized_event,
1222 machine);
1223 free(event);
1224
1225 if (tgid == -1)
1226 goto out_child;
1227
1228 event = malloc(sizeof(event->namespaces) +
1229 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1230 machine->id_hdr_size);
1231 if (event == NULL) {
1232 err = -ENOMEM;
1233 goto out_child;
1234 }
1235
1236 /*
1237 * Synthesize NAMESPACES event for the command specified.
1238 */
1239 perf_event__synthesize_namespaces(tool, event,
1240 rec->evlist->workload.pid,
1241 tgid, process_synthesized_event,
1242 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001243 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001244
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001245 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001246 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001247
Andi Kleen6619a532014-01-11 13:38:27 -08001248 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001249 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001250 perf_evlist__enable(rec->evlist);
1251 }
1252
Wang Nan5f9cf592016-04-20 18:59:49 +00001253 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001254 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001255 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001256 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001257 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001258
Wang Nan057374642016-07-14 08:34:43 +00001259 /*
1260 * rec->evlist->bkw_mmap_state is possible to be
1261 * BKW_MMAP_EMPTY here: when done == true and
1262 * hits != rec->samples in previous round.
1263 *
1264 * perf_evlist__toggle_bkw_mmap ensure we never
1265 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1266 */
1267 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1268 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1269
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001270 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001271 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001272 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001273 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001274 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001275 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001276
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001277 if (auxtrace_record__snapshot_started) {
1278 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001279 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001280 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001281 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001282 pr_err("AUX area tracing snapshot failed\n");
1283 err = -1;
1284 goto out_child;
1285 }
1286 }
1287
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001288 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001289 /*
1290 * If switch_output_trigger is hit, the data in
1291 * overwritable ring buffer should have been collected,
1292 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1293 *
1294 * If SIGUSR2 raise after or during record__mmap_read_all(),
1295 * record__mmap_read_all() didn't collect data from
1296 * overwritable ring buffer. Read again.
1297 */
1298 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1299 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001300 trigger_ready(&switch_output_trigger);
1301
Wang Nan057374642016-07-14 08:34:43 +00001302 /*
1303 * Reenable events in overwrite ring buffer after
1304 * record__mmap_read_all(): we should have collected
1305 * data from it.
1306 */
1307 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1308
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001309 if (!quiet)
1310 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1311 waking);
1312 waking = 0;
1313 fd = record__switch_output(rec, false);
1314 if (fd < 0) {
1315 pr_err("Failed to switch to new file\n");
1316 trigger_error(&switch_output_trigger);
1317 err = fd;
1318 goto out_child;
1319 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001320
1321 /* re-arm the alarm */
1322 if (rec->switch_output.time)
1323 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001324 }
1325
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001326 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001327 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001328 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001329 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001330 /*
1331 * Propagate error, only if there's any. Ignore positive
1332 * number of returned events and interrupt error.
1333 */
1334 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001335 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001336 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001337
1338 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1339 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001340 }
1341
Jiri Olsa774cb492012-11-12 18:34:01 +01001342 /*
1343 * When perf is starting the traced process, at the end events
1344 * die with the process and we wait for that. Thus no need to
1345 * disable events in this case.
1346 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001347 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001348 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001349 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001350 disabled = true;
1351 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001352 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001353 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001354 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001355
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001356 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001357 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001358 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001359 pr_err("Workload failed: %s\n", emsg);
1360 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001361 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001362 }
1363
Namhyung Kime3d59112015-01-29 17:06:44 +09001364 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001365 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001366
Wang Nan4ea648a2016-07-14 08:34:47 +00001367 if (target__none(&rec->opts.target))
1368 record__synthesize_workload(rec, true);
1369
Namhyung Kim45604712014-05-12 09:47:24 +09001370out_child:
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001371 record__aio_mmap_read_sync(rec);
1372
Namhyung Kim45604712014-05-12 09:47:24 +09001373 if (forks) {
1374 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001375
Namhyung Kim45604712014-05-12 09:47:24 +09001376 if (!child_finished)
1377 kill(rec->evlist->workload.pid, SIGTERM);
1378
1379 wait(&exit_status);
1380
1381 if (err < 0)
1382 status = err;
1383 else if (WIFEXITED(exit_status))
1384 status = WEXITSTATUS(exit_status);
1385 else if (WIFSIGNALED(exit_status))
1386 signr = WTERMSIG(exit_status);
1387 } else
1388 status = err;
1389
Wang Nan4ea648a2016-07-14 08:34:47 +00001390 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001391 /* this will be recalculated during process_buildids() */
1392 rec->samples = 0;
1393
Wang Nanecfd7a92016-04-13 08:21:07 +00001394 if (!err) {
1395 if (!rec->timestamp_filename) {
1396 record__finish_output(rec);
1397 } else {
1398 fd = record__switch_output(rec, true);
1399 if (fd < 0) {
1400 status = fd;
1401 goto out_delete_session;
1402 }
1403 }
1404 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001405
Wang Nana0748652016-11-26 07:03:28 +00001406 perf_hooks__invoke_record_end();
1407
Namhyung Kime3d59112015-01-29 17:06:44 +09001408 if (!err && !quiet) {
1409 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001410 const char *postfix = rec->timestamp_filename ?
1411 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001412
Adrian Hunteref149c22015-04-09 18:53:45 +03001413 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001414 scnprintf(samples, sizeof(samples),
1415 " (%" PRIu64 " samples)", rec->samples);
1416 else
1417 samples[0] = '\0';
1418
Wang Nanecfd7a92016-04-13 08:21:07 +00001419 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001420 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001421 data->file.path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001422 }
1423
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001424out_delete_session:
1425 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001426 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001427}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001428
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001429static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001430{
Kan Liangaad2b212015-01-05 13:23:04 -05001431 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001432
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001433 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001434
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001435 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001436 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001437 callchain->dump_size);
1438}
1439
1440int record_opts__parse_callchain(struct record_opts *record,
1441 struct callchain_param *callchain,
1442 const char *arg, bool unset)
1443{
1444 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001445 callchain->enabled = !unset;
1446
1447 /* --no-call-graph */
1448 if (unset) {
1449 callchain->record_mode = CALLCHAIN_NONE;
1450 pr_debug("callchain: disabled\n");
1451 return 0;
1452 }
1453
1454 ret = parse_callchain_record_opt(arg, callchain);
1455 if (!ret) {
1456 /* Enable data address sampling for DWARF unwind. */
1457 if (callchain->record_mode == CALLCHAIN_DWARF)
1458 record->sample_address = true;
1459 callchain_debug(callchain);
1460 }
1461
1462 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001463}
1464
Kan Liangc421e802015-07-29 05:42:12 -04001465int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001466 const char *arg,
1467 int unset)
1468{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001469 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001470}
1471
Kan Liangc421e802015-07-29 05:42:12 -04001472int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001473 const char *arg __maybe_unused,
1474 int unset __maybe_unused)
1475{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001476 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001477
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001478 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001479
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001480 if (callchain->record_mode == CALLCHAIN_NONE)
1481 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001482
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001483 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001484 return 0;
1485}
1486
Jiri Olsaeb853e82014-02-03 12:44:42 +01001487static int perf_record_config(const char *var, const char *value, void *cb)
1488{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001489 struct record *rec = cb;
1490
1491 if (!strcmp(var, "record.build-id")) {
1492 if (!strcmp(value, "cache"))
1493 rec->no_buildid_cache = false;
1494 else if (!strcmp(value, "no-cache"))
1495 rec->no_buildid_cache = true;
1496 else if (!strcmp(value, "skip"))
1497 rec->no_buildid = true;
1498 else
1499 return -1;
1500 return 0;
1501 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001502 if (!strcmp(var, "record.call-graph")) {
1503 var = "call-graph.record-mode";
1504 return perf_default_config(var, value, cb);
1505 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001506
Yisheng Xiecff17202018-03-12 19:25:57 +08001507 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001508}
1509
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001510struct clockid_map {
1511 const char *name;
1512 int clockid;
1513};
1514
1515#define CLOCKID_MAP(n, c) \
1516 { .name = n, .clockid = (c), }
1517
1518#define CLOCKID_END { .name = NULL, }
1519
1520
1521/*
1522 * Add the missing ones, we need to build on many distros...
1523 */
1524#ifndef CLOCK_MONOTONIC_RAW
1525#define CLOCK_MONOTONIC_RAW 4
1526#endif
1527#ifndef CLOCK_BOOTTIME
1528#define CLOCK_BOOTTIME 7
1529#endif
1530#ifndef CLOCK_TAI
1531#define CLOCK_TAI 11
1532#endif
1533
1534static const struct clockid_map clockids[] = {
1535 /* available for all events, NMI safe */
1536 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1537 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1538
1539 /* available for some events */
1540 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1541 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1542 CLOCKID_MAP("tai", CLOCK_TAI),
1543
1544 /* available for the lazy */
1545 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1546 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1547 CLOCKID_MAP("real", CLOCK_REALTIME),
1548 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1549
1550 CLOCKID_END,
1551};
1552
Alexey Budankovcf790512018-10-09 17:36:24 +03001553static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1554{
1555 struct timespec res;
1556
1557 *res_ns = 0;
1558 if (!clock_getres(clk_id, &res))
1559 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1560 else
1561 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1562
1563 return 0;
1564}
1565
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001566static int parse_clockid(const struct option *opt, const char *str, int unset)
1567{
1568 struct record_opts *opts = (struct record_opts *)opt->value;
1569 const struct clockid_map *cm;
1570 const char *ostr = str;
1571
1572 if (unset) {
1573 opts->use_clockid = 0;
1574 return 0;
1575 }
1576
1577 /* no arg passed */
1578 if (!str)
1579 return 0;
1580
1581 /* no setting it twice */
1582 if (opts->use_clockid)
1583 return -1;
1584
1585 opts->use_clockid = true;
1586
1587 /* if its a number, we're done */
1588 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001589 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001590
1591 /* allow a "CLOCK_" prefix to the name */
1592 if (!strncasecmp(str, "CLOCK_", 6))
1593 str += 6;
1594
1595 for (cm = clockids; cm->name; cm++) {
1596 if (!strcasecmp(str, cm->name)) {
1597 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001598 return get_clockid_res(opts->clockid,
1599 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001600 }
1601 }
1602
1603 opts->use_clockid = false;
1604 ui__warning("unknown clockid %s, check man page\n", ostr);
1605 return -1;
1606}
1607
Adrian Huntere9db1312015-04-09 18:53:46 +03001608static int record__parse_mmap_pages(const struct option *opt,
1609 const char *str,
1610 int unset __maybe_unused)
1611{
1612 struct record_opts *opts = opt->value;
1613 char *s, *p;
1614 unsigned int mmap_pages;
1615 int ret;
1616
1617 if (!str)
1618 return -EINVAL;
1619
1620 s = strdup(str);
1621 if (!s)
1622 return -ENOMEM;
1623
1624 p = strchr(s, ',');
1625 if (p)
1626 *p = '\0';
1627
1628 if (*s) {
1629 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1630 if (ret)
1631 goto out_free;
1632 opts->mmap_pages = mmap_pages;
1633 }
1634
1635 if (!p) {
1636 ret = 0;
1637 goto out_free;
1638 }
1639
1640 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1641 if (ret)
1642 goto out_free;
1643
1644 opts->auxtrace_mmap_pages = mmap_pages;
1645
1646out_free:
1647 free(s);
1648 return ret;
1649}
1650
Jiri Olsa0c582442017-01-09 10:51:59 +01001651static void switch_output_size_warn(struct record *rec)
1652{
1653 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1654 struct switch_output *s = &rec->switch_output;
1655
1656 wakeup_size /= 2;
1657
1658 if (s->size < wakeup_size) {
1659 char buf[100];
1660
1661 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1662 pr_warning("WARNING: switch-output data size lower than "
1663 "wakeup kernel buffer size (%s) "
1664 "expect bigger perf.data sizes\n", buf);
1665 }
1666}
1667
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001668static int switch_output_setup(struct record *rec)
1669{
1670 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001671 static struct parse_tag tags_size[] = {
1672 { .tag = 'B', .mult = 1 },
1673 { .tag = 'K', .mult = 1 << 10 },
1674 { .tag = 'M', .mult = 1 << 20 },
1675 { .tag = 'G', .mult = 1 << 30 },
1676 { .tag = 0 },
1677 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001678 static struct parse_tag tags_time[] = {
1679 { .tag = 's', .mult = 1 },
1680 { .tag = 'm', .mult = 60 },
1681 { .tag = 'h', .mult = 60*60 },
1682 { .tag = 'd', .mult = 60*60*24 },
1683 { .tag = 0 },
1684 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001685 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001686
1687 if (!s->set)
1688 return 0;
1689
1690 if (!strcmp(s->str, "signal")) {
1691 s->signal = true;
1692 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001693 goto enabled;
1694 }
1695
1696 val = parse_tag_value(s->str, tags_size);
1697 if (val != (unsigned long) -1) {
1698 s->size = val;
1699 pr_debug("switch-output with %s size threshold\n", s->str);
1700 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001701 }
1702
Jiri Olsabfacbe32017-01-09 10:52:00 +01001703 val = parse_tag_value(s->str, tags_time);
1704 if (val != (unsigned long) -1) {
1705 s->time = val;
1706 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1707 s->str, s->time);
1708 goto enabled;
1709 }
1710
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001711 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001712
1713enabled:
1714 rec->timestamp_filename = true;
1715 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001716
1717 if (s->size && !rec->opts.no_buffering)
1718 switch_output_size_warn(rec);
1719
Jiri Olsadc0c6122017-01-09 10:51:58 +01001720 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001721}
1722
Namhyung Kime5b2c202014-10-23 00:15:46 +09001723static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001724 "perf record [<options>] [<command>]",
1725 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001726 NULL
1727};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001728const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001729
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001730/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001731 * XXX Ideally would be local to cmd_record() and passed to a record__new
1732 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001733 * after cmd_record() exits, but since record_options need to be accessible to
1734 * builtin-script, leave it here.
1735 *
1736 * At least we don't ouch it in all the other functions here directly.
1737 *
1738 * Just say no to tons of global variables, sigh.
1739 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001740static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001741 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001742 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001743 .mmap_pages = UINT_MAX,
1744 .user_freq = UINT_MAX,
1745 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001746 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001747 .target = {
1748 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001749 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001750 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001751 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001752 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001753 .tool = {
1754 .sample = process_sample_event,
1755 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001756 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001757 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301758 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001759 .mmap = perf_event__process_mmap,
1760 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001761 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001762 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001763};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001764
Namhyung Kim76a26542015-10-22 23:28:32 +09001765const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1766 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001767
Wang Nan0aab2132016-06-16 08:02:41 +00001768static bool dry_run;
1769
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001770/*
1771 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1772 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001773 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001774 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1775 * using pipes, etc.
1776 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001777static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001778 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001779 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001780 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001781 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001782 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001783 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1784 NULL, "don't record events from perf itself",
1785 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001786 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001787 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001788 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001789 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001790 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001791 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001792 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001793 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001794 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001795 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001796 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001797 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001798 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001799 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001800 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001801 OPT_STRING('o', "output", &record.data.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001802 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001803 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1804 &record.opts.no_inherit_set,
1805 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001806 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1807 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001808 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001809 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1810 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001811 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1812 "profile at this frequency",
1813 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001814 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1815 "number of mmap data pages and AUX area tracing mmap pages",
1816 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001817 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001818 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001819 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001820 NULL, "enables call-graph recording" ,
1821 &record_callchain_opt),
1822 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001823 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001824 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001825 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001826 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001827 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001828 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001829 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001830 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001831 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1832 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001833 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001834 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1835 &record.opts.sample_time_set,
1836 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001837 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1838 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001839 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001840 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001841 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1842 &record.no_buildid_cache_set,
1843 "do not update the buildid cache"),
1844 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1845 &record.no_buildid_set,
1846 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001847 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001848 "monitor event in cgroup name only",
1849 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001850 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001851 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001852 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1853 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001854
1855 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1856 "branch any", "sample any taken branches",
1857 parse_branch_stack),
1858
1859 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1860 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001861 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001862 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1863 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001864 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1865 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001866 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1867 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001868 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1869 "sample selected machine registers on interrupt,"
1870 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001871 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1872 "sample selected machine registers on interrupt,"
1873 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001874 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1875 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001876 OPT_CALLBACK('k', "clockid", &record.opts,
1877 "clockid", "clockid to use for events, see clock_gettime()",
1878 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001879 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1880 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001881 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1882 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301883 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1884 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001885 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1886 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001887 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1888 "Configure all used events to run in kernel space.",
1889 PARSE_OPT_EXCLUSIVE),
1890 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1891 "Configure all used events to run in user space.",
1892 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001893 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1894 "clang binary to use for compiling BPF scriptlets"),
1895 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1896 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001897 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1898 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001899 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1900 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001901 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1902 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08001903 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1904 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001905 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001906 &record.switch_output.set, "signal,size,time",
1907 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001908 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001909 OPT_BOOLEAN(0, "dry-run", &dry_run,
1910 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001911#ifdef HAVE_AIO_SUPPORT
1912 OPT_CALLBACK_NOOPT(0, "aio", &record.opts,
1913 NULL, "Enable asynchronous trace writing mode",
1914 record__aio_parse),
1915#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001916 OPT_END()
1917};
1918
Namhyung Kime5b2c202014-10-23 00:15:46 +09001919struct option *record_options = __record_options;
1920
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001921int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001922{
Adrian Hunteref149c22015-04-09 18:53:45 +03001923 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001924 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001925 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001926
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001927 setlocale(LC_ALL, "");
1928
Wang Nan48e1cab2015-12-14 10:39:22 +00001929#ifndef HAVE_LIBBPF_SUPPORT
1930# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1931 set_nobuild('\0', "clang-path", true);
1932 set_nobuild('\0', "clang-opt", true);
1933# undef set_nobuild
1934#endif
1935
He Kuang7efe0e02015-12-14 10:39:23 +00001936#ifndef HAVE_BPF_PROLOGUE
1937# if !defined (HAVE_DWARF_SUPPORT)
1938# define REASON "NO_DWARF=1"
1939# elif !defined (HAVE_LIBBPF_SUPPORT)
1940# define REASON "NO_LIBBPF=1"
1941# else
1942# define REASON "this architecture doesn't support BPF prologue"
1943# endif
1944# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1945 set_nobuild('\0', "vmlinux", true);
1946# undef set_nobuild
1947# undef REASON
1948#endif
1949
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001950 rec->evlist = perf_evlist__new();
1951 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001952 return -ENOMEM;
1953
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001954 err = perf_config(perf_record_config, rec);
1955 if (err)
1956 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001957
Tom Zanussibca647a2010-11-10 08:11:30 -06001958 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001959 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09001960 if (quiet)
1961 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01001962
1963 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001964 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01001965 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001966
Namhyung Kimbea03402012-04-26 14:15:15 +09001967 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001968 usage_with_options_msg(record_usage, record_options,
1969 "cgroup monitoring only available in system-wide mode");
1970
Stephane Eranian023695d2011-02-14 11:20:01 +02001971 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001972 if (rec->opts.record_switch_events &&
1973 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001974 ui__error("kernel does not support recording context switch events\n");
1975 parse_options_usage(record_usage, record_options, "switch-events", 0);
1976 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001977 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001978
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001979 if (switch_output_setup(rec)) {
1980 parse_options_usage(record_usage, record_options, "switch-output", 0);
1981 return -EINVAL;
1982 }
1983
Jiri Olsabfacbe32017-01-09 10:52:00 +01001984 if (rec->switch_output.time) {
1985 signal(SIGALRM, alarm_sig_handler);
1986 alarm(rec->switch_output.time);
1987 }
1988
Adrian Hunter1b36c032016-09-23 17:38:39 +03001989 /*
1990 * Allow aliases to facilitate the lookup of symbols for address
1991 * filters. Refer to auxtrace_parse_filters().
1992 */
1993 symbol_conf.allow_aliases = true;
1994
1995 symbol__init(NULL);
1996
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02001997 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03001998 if (err)
1999 goto out;
2000
Wang Nan0aab2132016-06-16 08:02:41 +00002001 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002002 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002003
Wang Nand7888572016-04-08 15:07:24 +00002004 err = bpf__setup_stdout(rec->evlist);
2005 if (err) {
2006 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2007 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2008 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002009 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002010 }
2011
Adrian Hunteref149c22015-04-09 18:53:45 +03002012 err = -ENOMEM;
2013
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002014 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002015 pr_warning(
2016"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2017"check /proc/sys/kernel/kptr_restrict.\n\n"
2018"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2019"file is not found in the buildid cache or in the vmlinux path.\n\n"
2020"Samples in kernel modules won't be resolved at all.\n\n"
2021"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2022"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002023
Wang Nan0c1d46a2016-04-20 18:59:52 +00002024 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002025 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002026 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002027 /*
2028 * In 'perf record --switch-output', disable buildid
2029 * generation by default to reduce data file switching
2030 * overhead. Still generate buildid if they are required
2031 * explicitly using
2032 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002033 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002034 * --no-no-buildid-cache
2035 *
2036 * Following code equals to:
2037 *
2038 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2039 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2040 * disable_buildid_cache();
2041 */
2042 bool disable = true;
2043
2044 if (rec->no_buildid_set && !rec->no_buildid)
2045 disable = false;
2046 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2047 disable = false;
2048 if (disable) {
2049 rec->no_buildid = true;
2050 rec->no_buildid_cache = true;
2051 disable_buildid_cache();
2052 }
2053 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002054
Wang Nan4ea648a2016-07-14 08:34:47 +00002055 if (record.opts.overwrite)
2056 record.opts.tail_synthesize = true;
2057
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002058 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002059 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002060 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002061 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002062 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002063
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002064 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2065 rec->opts.no_inherit = true;
2066
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002067 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002068 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002069 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002070 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002071 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002072
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002073 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002074 if (err) {
2075 int saved_errno = errno;
2076
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002077 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002078 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002079
2080 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002081 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002082 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002083
Mengting Zhangca800062017-12-13 15:01:53 +08002084 /* Enable ignoring missing threads when -u/-p option is defined. */
2085 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002086
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002087 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002088 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002089 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002090
Adrian Hunteref149c22015-04-09 18:53:45 +03002091 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2092 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002093 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002094
Namhyung Kim61566812016-01-11 22:37:09 +09002095 /*
2096 * We take all buildids when the file contains
2097 * AUX area tracing data because we do not decode the
2098 * trace because it would take too long.
2099 */
2100 if (rec->opts.full_auxtrace)
2101 rec->buildid_all = true;
2102
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002103 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002104 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002105 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002106 }
2107
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002108 if (verbose > 0)
2109 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2110
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002111 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002112out:
Namhyung Kim45604712014-05-12 09:47:24 +09002113 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002114 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002115 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002116 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002117}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002118
2119static void snapshot_sig_handler(int sig __maybe_unused)
2120{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002121 struct record *rec = &record;
2122
Wang Nan5f9cf592016-04-20 18:59:49 +00002123 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2124 trigger_hit(&auxtrace_snapshot_trigger);
2125 auxtrace_record__snapshot_started = 1;
2126 if (auxtrace_record__snapshot_start(record.itr))
2127 trigger_error(&auxtrace_snapshot_trigger);
2128 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002129
Jiri Olsadc0c6122017-01-09 10:51:58 +01002130 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002131 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002132}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002133
2134static void alarm_sig_handler(int sig __maybe_unused)
2135{
2136 struct record *rec = &record;
2137
2138 if (switch_output_time(rec))
2139 trigger_hit(&switch_output_trigger);
2140}