blob: 45a80b3584ad74863f8ec9e0e8d0b7e33be54388 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 struct perf_evlist *evlist;
77 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300136#ifdef HAVE_AIO_SUPPORT
137static int record__aio_write(struct aiocb *cblock, int trace_fd,
138 void *buf, size_t size, off_t off)
139{
140 int rc;
141
142 cblock->aio_fildes = trace_fd;
143 cblock->aio_buf = buf;
144 cblock->aio_nbytes = size;
145 cblock->aio_offset = off;
146 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
147
148 do {
149 rc = aio_write(cblock);
150 if (rc == 0) {
151 break;
152 } else if (errno != EAGAIN) {
153 cblock->aio_fildes = -1;
154 pr_err("failed to queue perf data, error: %m\n");
155 break;
156 }
157 } while (1);
158
159 return rc;
160}
161
162static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
163{
164 void *rem_buf;
165 off_t rem_off;
166 size_t rem_size;
167 int rc, aio_errno;
168 ssize_t aio_ret, written;
169
170 aio_errno = aio_error(cblock);
171 if (aio_errno == EINPROGRESS)
172 return 0;
173
174 written = aio_ret = aio_return(cblock);
175 if (aio_ret < 0) {
176 if (aio_errno != EINTR)
177 pr_err("failed to write perf data, error: %m\n");
178 written = 0;
179 }
180
181 rem_size = cblock->aio_nbytes - written;
182
183 if (rem_size == 0) {
184 cblock->aio_fildes = -1;
185 /*
186 * md->refcount is incremented in perf_mmap__push() for
187 * every enqueued aio write request so decrement it because
188 * the request is now complete.
189 */
190 perf_mmap__put(md);
191 rc = 1;
192 } else {
193 /*
194 * aio write request may require restart with the
195 * reminder if the kernel didn't write whole
196 * chunk at once.
197 */
198 rem_off = cblock->aio_offset + written;
199 rem_buf = (void *)(cblock->aio_buf + written);
200 record__aio_write(cblock, cblock->aio_fildes,
201 rem_buf, rem_size, rem_off);
202 rc = 0;
203 }
204
205 return rc;
206}
207
Alexey Budankov93f20c02018-11-06 12:07:19 +0300208static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300209{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300210 struct aiocb **aiocb = md->aio.aiocb;
211 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300212 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300214
215 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216 do_suspend = 0;
217 for (i = 0; i < md->aio.nr_cblocks; ++i) {
218 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
219 if (sync_all)
220 aiocb[i] = NULL;
221 else
222 return i;
223 } else {
224 /*
225 * Started aio write is not complete yet
226 * so it has to be waited before the
227 * next allocation.
228 */
229 aiocb[i] = &cblocks[i];
230 do_suspend = 1;
231 }
232 }
233 if (!do_suspend)
234 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300235
Alexey Budankov93f20c02018-11-06 12:07:19 +0300236 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300237 if (!(errno == EAGAIN || errno == EINTR))
238 pr_err("failed to sync perf data, error: %m\n");
239 }
240 } while (1);
241}
242
243static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
244{
245 struct record *rec = to;
246 int ret, trace_fd = rec->session->data->file.fd;
247
248 rec->samples++;
249
250 ret = record__aio_write(cblock, trace_fd, bf, size, off);
251 if (!ret) {
252 rec->bytes_written += size;
253 if (switch_output_size(rec))
254 trigger_hit(&switch_output_trigger);
255 }
256
257 return ret;
258}
259
260static off_t record__aio_get_pos(int trace_fd)
261{
262 return lseek(trace_fd, 0, SEEK_CUR);
263}
264
265static void record__aio_set_pos(int trace_fd, off_t pos)
266{
267 lseek(trace_fd, pos, SEEK_SET);
268}
269
270static void record__aio_mmap_read_sync(struct record *rec)
271{
272 int i;
273 struct perf_evlist *evlist = rec->evlist;
274 struct perf_mmap *maps = evlist->mmap;
275
276 if (!rec->opts.nr_cblocks)
277 return;
278
279 for (i = 0; i < evlist->nr_mmaps; i++) {
280 struct perf_mmap *map = &maps[i];
281
282 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300283 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300284 }
285}
286
287static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300288static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300289
290static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300291 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300292 int unset)
293{
294 struct record_opts *opts = (struct record_opts *)opt->value;
295
Alexey Budankov93f20c02018-11-06 12:07:19 +0300296 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300297 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300298 } else {
299 if (str)
300 opts->nr_cblocks = strtol(str, NULL, 0);
301 if (!opts->nr_cblocks)
302 opts->nr_cblocks = nr_cblocks_default;
303 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300304
305 return 0;
306}
307#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300308static int nr_cblocks_max = 0;
309
310static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300311{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300312 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300313}
314
315static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
316 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
317{
318 return -1;
319}
320
321static off_t record__aio_get_pos(int trace_fd __maybe_unused)
322{
323 return -1;
324}
325
326static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
327{
328}
329
330static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
331{
332}
333#endif
334
335static int record__aio_enabled(struct record *rec)
336{
337 return rec->opts.nr_cblocks > 0;
338}
339
Alexey Budankov470530b2019-03-18 20:40:26 +0300340#define MMAP_FLUSH_DEFAULT 1
341static int record__mmap_flush_parse(const struct option *opt,
342 const char *str,
343 int unset)
344{
345 int flush_max;
346 struct record_opts *opts = (struct record_opts *)opt->value;
347 static struct parse_tag tags[] = {
348 { .tag = 'B', .mult = 1 },
349 { .tag = 'K', .mult = 1 << 10 },
350 { .tag = 'M', .mult = 1 << 20 },
351 { .tag = 'G', .mult = 1 << 30 },
352 { .tag = 0 },
353 };
354
355 if (unset)
356 return 0;
357
358 if (str) {
359 opts->mmap_flush = parse_tag_value(str, tags);
360 if (opts->mmap_flush == (int)-1)
361 opts->mmap_flush = strtol(str, NULL, 0);
362 }
363
364 if (!opts->mmap_flush)
365 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
366
367 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
368 flush_max /= 4;
369 if (opts->mmap_flush > flush_max)
370 opts->mmap_flush = flush_max;
371
372 return 0;
373}
374
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300375static int record__comp_enabled(struct record *rec)
376{
377 return rec->opts.comp_level > 0;
378}
379
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200380static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200381 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300382 struct perf_sample *sample __maybe_unused,
383 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200384{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300385 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200386 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200387}
388
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200389static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300390{
391 struct record *rec = to;
392
393 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200394 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300395}
396
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300397static volatile int done;
398static volatile int signr = -1;
399static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000400
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300401static void sig_handler(int sig)
402{
403 if (sig == SIGCHLD)
404 child_finished = 1;
405 else
406 signr = sig;
407
408 done = 1;
409}
410
Wang Nana0748652016-11-26 07:03:28 +0000411static void sigsegv_handler(int sig)
412{
413 perf_hooks__recover();
414 sighandler_dump_stack(sig);
415}
416
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300417static void record__sig_exit(void)
418{
419 if (signr == -1)
420 return;
421
422 signal(signr, SIG_DFL);
423 raise(signr);
424}
425
Adrian Huntere31f0d02015-04-30 17:37:27 +0300426#ifdef HAVE_AUXTRACE_SUPPORT
427
Adrian Hunteref149c22015-04-09 18:53:45 +0300428static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200429 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300430 union perf_event *event, void *data1,
431 size_t len1, void *data2, size_t len2)
432{
433 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100434 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300435 size_t padding;
436 u8 pad[8] = {0};
437
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100438 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300439 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100440 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300441 int err;
442
443 file_offset = lseek(fd, 0, SEEK_CUR);
444 if (file_offset == -1)
445 return -1;
446 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
447 event, file_offset);
448 if (err)
449 return err;
450 }
451
Adrian Hunteref149c22015-04-09 18:53:45 +0300452 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
453 padding = (len1 + len2) & 7;
454 if (padding)
455 padding = 8 - padding;
456
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200457 record__write(rec, map, event, event->header.size);
458 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300459 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200460 record__write(rec, map, data2, len2);
461 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300462
463 return 0;
464}
465
466static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200467 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300468{
469 int ret;
470
Jiri Olsae035f4c2018-09-13 14:54:05 +0200471 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300472 record__process_auxtrace);
473 if (ret < 0)
474 return ret;
475
476 if (ret)
477 rec->samples++;
478
479 return 0;
480}
481
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300482static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200483 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300484{
485 int ret;
486
Jiri Olsae035f4c2018-09-13 14:54:05 +0200487 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300488 record__process_auxtrace,
489 rec->opts.auxtrace_snapshot_size);
490 if (ret < 0)
491 return ret;
492
493 if (ret)
494 rec->samples++;
495
496 return 0;
497}
498
499static int record__auxtrace_read_snapshot_all(struct record *rec)
500{
501 int i;
502 int rc = 0;
503
504 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200505 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300506
Jiri Olsae035f4c2018-09-13 14:54:05 +0200507 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300508 continue;
509
Jiri Olsae035f4c2018-09-13 14:54:05 +0200510 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300511 rc = -1;
512 goto out;
513 }
514 }
515out:
516 return rc;
517}
518
519static void record__read_auxtrace_snapshot(struct record *rec)
520{
521 pr_debug("Recording AUX area tracing snapshot\n");
522 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000523 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300524 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000525 if (auxtrace_record__snapshot_finish(rec->itr))
526 trigger_error(&auxtrace_snapshot_trigger);
527 else
528 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300529 }
530}
531
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200532static int record__auxtrace_init(struct record *rec)
533{
534 int err;
535
536 if (!rec->itr) {
537 rec->itr = auxtrace_record__init(rec->evlist, &err);
538 if (err)
539 return err;
540 }
541
542 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
543 rec->opts.auxtrace_snapshot_opts);
544 if (err)
545 return err;
546
547 return auxtrace_parse_filters(rec->evlist);
548}
549
Adrian Huntere31f0d02015-04-30 17:37:27 +0300550#else
551
552static inline
553int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200554 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300555{
556 return 0;
557}
558
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300559static inline
560void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
561{
562}
563
564static inline
565int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
566{
567 return 0;
568}
569
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200570static int record__auxtrace_init(struct record *rec __maybe_unused)
571{
572 return 0;
573}
574
Adrian Huntere31f0d02015-04-30 17:37:27 +0300575#endif
576
Wang Nancda57a82016-06-27 10:24:03 +0000577static int record__mmap_evlist(struct record *rec,
578 struct perf_evlist *evlist)
579{
580 struct record_opts *opts = &rec->opts;
581 char msg[512];
582
Alexey Budankovf13de662019-01-22 20:50:57 +0300583 if (opts->affinity != PERF_AFFINITY_SYS)
584 cpu__setup_cpunode_map();
585
Wang Nan7a276ff2017-12-03 02:00:38 +0000586 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000587 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300588 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300589 opts->nr_cblocks, opts->affinity,
590 opts->mmap_flush) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000591 if (errno == EPERM) {
592 pr_err("Permission error mapping pages.\n"
593 "Consider increasing "
594 "/proc/sys/kernel/perf_event_mlock_kb,\n"
595 "or try again with a smaller value of -m/--mmap_pages.\n"
596 "(current value: %u,%u)\n",
597 opts->mmap_pages, opts->auxtrace_mmap_pages);
598 return -errno;
599 } else {
600 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300601 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000602 if (errno)
603 return -errno;
604 else
605 return -EINVAL;
606 }
607 }
608 return 0;
609}
610
611static int record__mmap(struct record *rec)
612{
613 return record__mmap_evlist(rec, rec->evlist);
614}
615
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300616static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200617{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300618 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200619 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200620 struct perf_evlist *evlist = rec->evlist;
621 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300622 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600623 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200624
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300625 /*
626 * For initial_delay we need to add a dummy event so that we can track
627 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
628 * real events, the ones asked by the user.
629 */
630 if (opts->initial_delay) {
631 if (perf_evlist__add_dummy(evlist))
632 return -ENOMEM;
633
634 pos = perf_evlist__first(evlist);
635 pos->tracking = 0;
636 pos = perf_evlist__last(evlist);
637 pos->tracking = 1;
638 pos->attr.enable_on_exec = 1;
639 }
640
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300641 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100642
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300643 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200644try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400645 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300646 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900647 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300648 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300649 goto try_again;
650 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700651 if ((errno == EINVAL || errno == EBADF) &&
652 pos->leader != pos &&
653 pos->weak_group) {
654 pos = perf_evlist__reset_weak_group(evlist, pos);
655 goto try_again;
656 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300657 rc = -errno;
658 perf_evsel__open_strerror(pos, &opts->target,
659 errno, msg, sizeof(msg));
660 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600661 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300662 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800663
664 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800665 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200666
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300667 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300668 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300669 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300670 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600671 rc = -1;
672 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100673 }
674
Wang Nancda57a82016-06-27 10:24:03 +0000675 rc = record__mmap(rec);
676 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600677 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200678
Jiri Olsa563aecb2013-06-05 13:35:06 +0200679 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300680 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600681out:
682 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200683}
684
Namhyung Kime3d59112015-01-29 17:06:44 +0900685static int process_sample_event(struct perf_tool *tool,
686 union perf_event *event,
687 struct perf_sample *sample,
688 struct perf_evsel *evsel,
689 struct machine *machine)
690{
691 struct record *rec = container_of(tool, struct record, tool);
692
Jin Yao68588ba2017-12-08 21:13:42 +0800693 if (rec->evlist->first_sample_time == 0)
694 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900695
Jin Yao68588ba2017-12-08 21:13:42 +0800696 rec->evlist->last_sample_time = sample->time;
697
698 if (rec->buildid_all)
699 return 0;
700
701 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900702 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
703}
704
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300705static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200706{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200707 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200708
Jiri Olsa45112e82019-02-21 10:41:29 +0100709 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300710 return 0;
711
Namhyung Kim00dc8652014-11-04 10:14:32 +0900712 /*
713 * During this process, it'll load kernel map and replace the
714 * dso->long_name to a real pathname it found. In this case
715 * we prefer the vmlinux path like
716 * /lib/modules/3.16.4/build/vmlinux
717 *
718 * rather than build-id path (in debug directory).
719 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
720 */
721 symbol_conf.ignore_vmlinux_buildid = true;
722
Namhyung Kim61566812016-01-11 22:37:09 +0900723 /*
724 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800725 * so no need to process samples. But if timestamp_boundary is enabled,
726 * it still needs to walk on all samples to get the timestamps of
727 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900728 */
Jin Yao68588ba2017-12-08 21:13:42 +0800729 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900730 rec->tool.sample = NULL;
731
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300732 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200733}
734
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200735static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800736{
737 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200738 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800739 /*
740 *As for guest kernel when processing subcommand record&report,
741 *we arrange module mmap prior to guest kernel mmap and trigger
742 *a preload dso because default guest module symbols are loaded
743 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
744 *method is used to avoid symbol missing when the first addr is
745 *in module instead of in guest kernel.
746 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200747 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200748 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800749 if (err < 0)
750 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300751 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800752
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800753 /*
754 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
755 * have no _text sometimes.
756 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200757 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200758 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800759 if (err < 0)
760 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300761 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800762}
763
Frederic Weisbecker98402802010-05-02 22:05:29 +0200764static struct perf_event_header finished_round_event = {
765 .size = sizeof(struct perf_event_header),
766 .type = PERF_RECORD_FINISHED_ROUND,
767};
768
Alexey Budankovf13de662019-01-22 20:50:57 +0300769static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
770{
771 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
772 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
773 CPU_ZERO(&rec->affinity_mask);
774 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
775 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
776 }
777}
778
Wang Nana4ea0ec2016-07-14 08:34:36 +0000779static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300780 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200781{
Jiri Olsadcabb502014-07-25 16:56:16 +0200782 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200783 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600784 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000785 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300786 int trace_fd = rec->data.file.fd;
787 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200788
Wang Nancb216862016-06-27 10:24:04 +0000789 if (!evlist)
790 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300791
Wang Nan0b72d692017-12-04 16:51:07 +0000792 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000793 if (!maps)
794 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000795
Wang Nan0b72d692017-12-04 16:51:07 +0000796 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000797 return 0;
798
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300799 if (record__aio_enabled(rec))
800 off = record__aio_get_pos(trace_fd);
801
Wang Nana4ea0ec2016-07-14 08:34:36 +0000802 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300803 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200804 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000805
Jiri Olsae035f4c2018-09-13 14:54:05 +0200806 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300807 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300808 if (synch) {
809 flush = map->flush;
810 map->flush = 1;
811 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300812 if (!record__aio_enabled(rec)) {
813 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300814 if (synch)
815 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300816 rc = -1;
817 goto out;
818 }
819 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300820 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300821 /*
822 * Call record__aio_sync() to wait till map->data buffer
823 * becomes available after previous aio write request.
824 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300825 idx = record__aio_sync(map, false);
826 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300827 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300828 if (synch)
829 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300830 rc = -1;
831 goto out;
832 }
David Ahern8d3eca22012-08-26 12:24:47 -0600833 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300834 if (synch)
835 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600836 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300837
Jiri Olsae035f4c2018-09-13 14:54:05 +0200838 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
839 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300840 rc = -1;
841 goto out;
842 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200843 }
844
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300845 if (record__aio_enabled(rec))
846 record__aio_set_pos(trace_fd, off);
847
Jiri Olsadcabb502014-07-25 16:56:16 +0200848 /*
849 * Mark the round finished in case we wrote
850 * at least one event.
851 */
852 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200853 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600854
Wang Nan0b72d692017-12-04 16:51:07 +0000855 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000856 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600857out:
858 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200859}
860
Alexey Budankov470530b2019-03-18 20:40:26 +0300861static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +0000862{
863 int err;
864
Alexey Budankov470530b2019-03-18 20:40:26 +0300865 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +0000866 if (err)
867 return err;
868
Alexey Budankov470530b2019-03-18 20:40:26 +0300869 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +0000870}
871
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300872static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700873{
David Ahern57706ab2013-11-06 11:41:34 -0700874 struct perf_session *session = rec->session;
875 int feat;
876
877 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
878 perf_header__set_feat(&session->header, feat);
879
880 if (rec->no_buildid)
881 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
882
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300883 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700884 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
885
886 if (!rec->opts.branch_stack)
887 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300888
889 if (!rec->opts.full_auxtrace)
890 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100891
Alexey Budankovcf790512018-10-09 17:36:24 +0300892 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
893 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
894
Jiri Olsa258031c2019-03-08 14:47:39 +0100895 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300896 if (!record__comp_enabled(rec))
897 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +0100898
Jiri Olsaffa517a2015-10-25 15:51:43 +0100899 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700900}
901
Wang Nane1ab48b2016-02-26 09:32:10 +0000902static void
903record__finish_output(struct record *rec)
904{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100905 struct perf_data *data = &rec->data;
906 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000907
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100908 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000909 return;
910
911 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +0100912 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000913
914 if (!rec->no_buildid) {
915 process_buildids(rec);
916
917 if (rec->buildid_all)
918 dsos__hit_all(rec->session);
919 }
920 perf_session__write_header(rec->session, rec->evlist, fd, true);
921
922 return;
923}
924
Wang Nan4ea648a2016-07-14 08:34:47 +0000925static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000926{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300927 int err;
928 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000929
Wang Nan4ea648a2016-07-14 08:34:47 +0000930 if (rec->opts.tail_synthesize != tail)
931 return 0;
932
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300933 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
934 if (thread_map == NULL)
935 return -1;
936
937 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000938 process_synthesized_event,
939 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800940 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300941 thread_map__put(thread_map);
942 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000943}
944
Wang Nan4ea648a2016-07-14 08:34:47 +0000945static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000946
Wang Nanecfd7a92016-04-13 08:21:07 +0000947static int
948record__switch_output(struct record *rec, bool at_exit)
949{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100950 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000951 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -0700952 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +0000953
954 /* Same Size: "2015122520103046"*/
955 char timestamp[] = "InvalidTimestamp";
956
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300957 record__aio_mmap_read_sync(rec);
958
Wang Nan4ea648a2016-07-14 08:34:47 +0000959 record__synthesize(rec, true);
960 if (target__none(&rec->opts.target))
961 record__synthesize_workload(rec, true);
962
Wang Nanecfd7a92016-04-13 08:21:07 +0000963 rec->samples = 0;
964 record__finish_output(rec);
965 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
966 if (err) {
967 pr_err("Failed to get current timestamp\n");
968 return -EINVAL;
969 }
970
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100971 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000972 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -0700973 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +0000974 if (fd >= 0 && !at_exit) {
975 rec->bytes_written = 0;
976 rec->session->header.data_size = 0;
977 }
978
979 if (!quiet)
980 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +0100981 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000982
Andi Kleen03724b22019-03-14 15:49:55 -0700983 if (rec->switch_output.num_files) {
984 int n = rec->switch_output.cur_file + 1;
985
986 if (n >= rec->switch_output.num_files)
987 n = 0;
988 rec->switch_output.cur_file = n;
989 if (rec->switch_output.filenames[n]) {
990 remove(rec->switch_output.filenames[n]);
991 free(rec->switch_output.filenames[n]);
992 }
993 rec->switch_output.filenames[n] = new_filename;
994 } else {
995 free(new_filename);
996 }
997
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000998 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000999 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001000 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001001
Wang Nanbe7b0c92016-04-20 18:59:54 +00001002 /*
1003 * In 'perf record --switch-output' without -a,
1004 * record__synthesize() in record__switch_output() won't
1005 * generate tracking events because there's no thread_map
1006 * in evlist. Which causes newly created perf.data doesn't
1007 * contain map and comm information.
1008 * Create a fake thread_map and directly call
1009 * perf_event__synthesize_thread_map() for those events.
1010 */
1011 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001012 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001013 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001014 return fd;
1015}
1016
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001017static volatile int workload_exec_errno;
1018
1019/*
1020 * perf_evlist__prepare_workload will send a SIGUSR1
1021 * if the fork fails, since we asked by setting its
1022 * want_signal to true.
1023 */
Namhyung Kim45604712014-05-12 09:47:24 +09001024static void workload_exec_failed_signal(int signo __maybe_unused,
1025 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001026 void *ucontext __maybe_unused)
1027{
1028 workload_exec_errno = info->si_value.sival_int;
1029 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001030 child_finished = 1;
1031}
1032
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001033static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001034static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001035
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001036int __weak
1037perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1038 struct perf_tool *tool __maybe_unused,
1039 perf_event__handler_t process __maybe_unused,
1040 struct machine *machine __maybe_unused)
1041{
1042 return 0;
1043}
1044
Wang Nanee667f92016-06-27 10:24:05 +00001045static const struct perf_event_mmap_page *
1046perf_evlist__pick_pc(struct perf_evlist *evlist)
1047{
Wang Nanb2cb6152016-07-14 08:34:39 +00001048 if (evlist) {
1049 if (evlist->mmap && evlist->mmap[0].base)
1050 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001051 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1052 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001053 }
Wang Nanee667f92016-06-27 10:24:05 +00001054 return NULL;
1055}
1056
Wang Nanc45628b2016-05-24 02:28:59 +00001057static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1058{
Wang Nanee667f92016-06-27 10:24:05 +00001059 const struct perf_event_mmap_page *pc;
1060
1061 pc = perf_evlist__pick_pc(rec->evlist);
1062 if (pc)
1063 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001064 return NULL;
1065}
1066
Wang Nan4ea648a2016-07-14 08:34:47 +00001067static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001068{
1069 struct perf_session *session = rec->session;
1070 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001071 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001072 struct record_opts *opts = &rec->opts;
1073 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001074 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001075 int err = 0;
1076
Wang Nan4ea648a2016-07-14 08:34:47 +00001077 if (rec->opts.tail_synthesize != tail)
1078 return 0;
1079
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001080 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001081 /*
1082 * We need to synthesize events first, because some
1083 * features works on top of them (on report side).
1084 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001085 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001086 process_synthesized_event);
1087 if (err < 0) {
1088 pr_err("Couldn't synthesize attrs.\n");
1089 goto out;
1090 }
1091
Jiri Olsaa2015512018-03-14 10:22:04 +01001092 err = perf_event__synthesize_features(tool, session, rec->evlist,
1093 process_synthesized_event);
1094 if (err < 0) {
1095 pr_err("Couldn't synthesize features.\n");
1096 return err;
1097 }
1098
Wang Nanc45c86e2016-02-26 09:32:07 +00001099 if (have_tracepoints(&rec->evlist->entries)) {
1100 /*
1101 * FIXME err <= 0 here actually means that
1102 * there were no tracepoints so its not really
1103 * an error, just that we don't need to
1104 * synthesize anything. We really have to
1105 * return this more properly and also
1106 * propagate errors that now are calling die()
1107 */
1108 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1109 process_synthesized_event);
1110 if (err <= 0) {
1111 pr_err("Couldn't record tracing data.\n");
1112 goto out;
1113 }
1114 rec->bytes_written += err;
1115 }
1116 }
1117
Wang Nanc45628b2016-05-24 02:28:59 +00001118 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001119 process_synthesized_event, machine);
1120 if (err)
1121 goto out;
1122
Wang Nanc45c86e2016-02-26 09:32:07 +00001123 if (rec->opts.full_auxtrace) {
1124 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1125 session, process_synthesized_event);
1126 if (err)
1127 goto out;
1128 }
1129
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001130 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1131 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1132 machine);
1133 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1134 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1135 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001136
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001137 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1138 machine);
1139 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1140 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1141 "Check /proc/modules permission or run as root.\n");
1142 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001143
1144 if (perf_guest) {
1145 machines__process_guests(&session->machines,
1146 perf_event__synthesize_guest_os, tool);
1147 }
1148
Andi Kleenbfd8f722017-11-17 13:42:58 -08001149 err = perf_event__synthesize_extra_attr(&rec->tool,
1150 rec->evlist,
1151 process_synthesized_event,
1152 data->is_pipe);
1153 if (err)
1154 goto out;
1155
Andi Kleen373565d2017-11-17 13:42:59 -08001156 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1157 process_synthesized_event,
1158 NULL);
1159 if (err < 0) {
1160 pr_err("Couldn't synthesize thread map.\n");
1161 return err;
1162 }
1163
1164 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1165 process_synthesized_event, NULL);
1166 if (err < 0) {
1167 pr_err("Couldn't synthesize cpu map.\n");
1168 return err;
1169 }
1170
Song Liue5416952019-03-11 22:30:41 -07001171 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001172 machine, opts);
1173 if (err < 0)
1174 pr_warning("Couldn't synthesize bpf events.\n");
1175
Wang Nanc45c86e2016-02-26 09:32:07 +00001176 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1177 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001178 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001179out:
1180 return err;
1181}
1182
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001183static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001184{
David Ahern57706ab2013-11-06 11:41:34 -07001185 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001186 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001187 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001188 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001189 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001190 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001191 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001192 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001193 bool disabled = false, draining = false;
Song Liu657ee552019-03-11 22:30:50 -07001194 struct perf_evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001195 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001196 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001197
Namhyung Kim45604712014-05-12 09:47:24 +09001198 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001199 signal(SIGCHLD, sig_handler);
1200 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001201 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001202 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001203
Hari Bathinif3b36142017-03-08 02:11:43 +05301204 if (rec->opts.record_namespaces)
1205 tool->namespace_events = true;
1206
Jiri Olsadc0c6122017-01-09 10:51:58 +01001207 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001208 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001209 if (rec->opts.auxtrace_snapshot_mode)
1210 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001211 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001212 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001213 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001214 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001215 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001216
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001217 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001218 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001219 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001220 return -1;
1221 }
1222
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001223 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001224 rec->session = session;
1225
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001226 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001227
Alexey Budankovcf790512018-10-09 17:36:24 +03001228 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1229 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1230
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001231 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001232 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001233 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001234 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001235 if (err < 0) {
1236 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001237 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001238 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001239 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001240 }
1241
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001242 /*
1243 * If we have just single event and are sending data
1244 * through pipe, we need to force the ids allocation,
1245 * because we synthesize event name through the pipe
1246 * and need the id for that.
1247 */
1248 if (data->is_pipe && rec->evlist->nr_entries == 1)
1249 rec->opts.sample_id = true;
1250
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001251 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001252 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001253 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001254 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001255 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001256
Wang Nan8690a2a2016-02-22 09:10:32 +00001257 err = bpf__apply_obj_config();
1258 if (err) {
1259 char errbuf[BUFSIZ];
1260
1261 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1262 pr_err("ERROR: Apply config to BPF failed: %s\n",
1263 errbuf);
1264 goto out_child;
1265 }
1266
Adrian Huntercca84822015-08-19 17:29:21 +03001267 /*
1268 * Normally perf_session__new would do this, but it doesn't have the
1269 * evlist.
1270 */
1271 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1272 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1273 rec->tool.ordered_events = false;
1274 }
1275
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001276 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001277 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1278
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001279 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001280 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001281 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001282 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001283 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001284 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001285 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001286 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001287 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001288
David Ahernd3665492012-02-06 15:27:52 -07001289 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001290 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001291 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001292 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001293 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001294 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001295 }
1296
Song Liud56354d2019-03-11 22:30:51 -07001297 if (!opts->no_bpf_event)
1298 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1299
Song Liu657ee552019-03-11 22:30:50 -07001300 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1301 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1302 opts->no_bpf_event = true;
1303 }
1304
Wang Nan4ea648a2016-07-14 08:34:47 +00001305 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001306 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001307 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001308
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001309 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001310 struct sched_param param;
1311
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001312 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001313 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001314 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001315 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001316 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001317 }
1318 }
1319
Jiri Olsa774cb492012-11-12 18:34:01 +01001320 /*
1321 * When perf is starting the traced process, all the events
1322 * (apart from group members) have enable_on_exec=1 set,
1323 * so don't spoil it by prematurely enabling them.
1324 */
Andi Kleen6619a532014-01-11 13:38:27 -08001325 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001326 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001327
Peter Zijlstra856e9662009-12-16 17:55:55 +01001328 /*
1329 * Let the child rip
1330 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001331 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001332 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001333 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301334 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001335
1336 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1337 if (event == NULL) {
1338 err = -ENOMEM;
1339 goto out_child;
1340 }
1341
Namhyung Kime803cf92015-09-22 09:24:55 +09001342 /*
1343 * Some H/W events are generated before COMM event
1344 * which is emitted during exec(), so perf script
1345 * cannot see a correct process name for those events.
1346 * Synthesize COMM event to prevent it.
1347 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301348 tgid = perf_event__synthesize_comm(tool, event,
1349 rec->evlist->workload.pid,
1350 process_synthesized_event,
1351 machine);
1352 free(event);
1353
1354 if (tgid == -1)
1355 goto out_child;
1356
1357 event = malloc(sizeof(event->namespaces) +
1358 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1359 machine->id_hdr_size);
1360 if (event == NULL) {
1361 err = -ENOMEM;
1362 goto out_child;
1363 }
1364
1365 /*
1366 * Synthesize NAMESPACES event for the command specified.
1367 */
1368 perf_event__synthesize_namespaces(tool, event,
1369 rec->evlist->workload.pid,
1370 tgid, process_synthesized_event,
1371 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001372 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001373
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001374 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001375 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001376
Andi Kleen6619a532014-01-11 13:38:27 -08001377 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001378 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001379 perf_evlist__enable(rec->evlist);
1380 }
1381
Wang Nan5f9cf592016-04-20 18:59:49 +00001382 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001383 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001384 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001385 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001386 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001387
Wang Nan057374642016-07-14 08:34:43 +00001388 /*
1389 * rec->evlist->bkw_mmap_state is possible to be
1390 * BKW_MMAP_EMPTY here: when done == true and
1391 * hits != rec->samples in previous round.
1392 *
1393 * perf_evlist__toggle_bkw_mmap ensure we never
1394 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1395 */
1396 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1397 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1398
Alexey Budankov470530b2019-03-18 20:40:26 +03001399 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001400 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001401 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001402 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001403 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001404 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001405
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001406 if (auxtrace_record__snapshot_started) {
1407 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001408 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001409 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001410 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001411 pr_err("AUX area tracing snapshot failed\n");
1412 err = -1;
1413 goto out_child;
1414 }
1415 }
1416
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001417 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001418 /*
1419 * If switch_output_trigger is hit, the data in
1420 * overwritable ring buffer should have been collected,
1421 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1422 *
1423 * If SIGUSR2 raise after or during record__mmap_read_all(),
1424 * record__mmap_read_all() didn't collect data from
1425 * overwritable ring buffer. Read again.
1426 */
1427 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1428 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001429 trigger_ready(&switch_output_trigger);
1430
Wang Nan057374642016-07-14 08:34:43 +00001431 /*
1432 * Reenable events in overwrite ring buffer after
1433 * record__mmap_read_all(): we should have collected
1434 * data from it.
1435 */
1436 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1437
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001438 if (!quiet)
1439 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1440 waking);
1441 waking = 0;
1442 fd = record__switch_output(rec, false);
1443 if (fd < 0) {
1444 pr_err("Failed to switch to new file\n");
1445 trigger_error(&switch_output_trigger);
1446 err = fd;
1447 goto out_child;
1448 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001449
1450 /* re-arm the alarm */
1451 if (rec->switch_output.time)
1452 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001453 }
1454
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001455 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001456 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001457 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001458 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001459 /*
1460 * Propagate error, only if there's any. Ignore positive
1461 * number of returned events and interrupt error.
1462 */
1463 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001464 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001465 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001466
1467 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1468 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001469 }
1470
Jiri Olsa774cb492012-11-12 18:34:01 +01001471 /*
1472 * When perf is starting the traced process, at the end events
1473 * die with the process and we wait for that. Thus no need to
1474 * disable events in this case.
1475 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001476 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001477 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001478 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001479 disabled = true;
1480 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001481 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001482 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001483 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001484
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001485 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001486 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001487 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001488 pr_err("Workload failed: %s\n", emsg);
1489 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001490 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001491 }
1492
Namhyung Kime3d59112015-01-29 17:06:44 +09001493 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001494 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001495
Wang Nan4ea648a2016-07-14 08:34:47 +00001496 if (target__none(&rec->opts.target))
1497 record__synthesize_workload(rec, true);
1498
Namhyung Kim45604712014-05-12 09:47:24 +09001499out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001500 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001501 record__aio_mmap_read_sync(rec);
1502
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001503 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1504 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1505 session->header.env.comp_ratio = ratio + 0.5;
1506 }
1507
Namhyung Kim45604712014-05-12 09:47:24 +09001508 if (forks) {
1509 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001510
Namhyung Kim45604712014-05-12 09:47:24 +09001511 if (!child_finished)
1512 kill(rec->evlist->workload.pid, SIGTERM);
1513
1514 wait(&exit_status);
1515
1516 if (err < 0)
1517 status = err;
1518 else if (WIFEXITED(exit_status))
1519 status = WEXITSTATUS(exit_status);
1520 else if (WIFSIGNALED(exit_status))
1521 signr = WTERMSIG(exit_status);
1522 } else
1523 status = err;
1524
Wang Nan4ea648a2016-07-14 08:34:47 +00001525 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001526 /* this will be recalculated during process_buildids() */
1527 rec->samples = 0;
1528
Wang Nanecfd7a92016-04-13 08:21:07 +00001529 if (!err) {
1530 if (!rec->timestamp_filename) {
1531 record__finish_output(rec);
1532 } else {
1533 fd = record__switch_output(rec, true);
1534 if (fd < 0) {
1535 status = fd;
1536 goto out_delete_session;
1537 }
1538 }
1539 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001540
Wang Nana0748652016-11-26 07:03:28 +00001541 perf_hooks__invoke_record_end();
1542
Namhyung Kime3d59112015-01-29 17:06:44 +09001543 if (!err && !quiet) {
1544 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001545 const char *postfix = rec->timestamp_filename ?
1546 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001547
Adrian Hunteref149c22015-04-09 18:53:45 +03001548 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001549 scnprintf(samples, sizeof(samples),
1550 " (%" PRIu64 " samples)", rec->samples);
1551 else
1552 samples[0] = '\0';
1553
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001554 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001555 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001556 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001557 if (ratio) {
1558 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1559 rec->session->bytes_transferred / 1024.0 / 1024.0,
1560 ratio);
1561 }
1562 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001563 }
1564
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001565out_delete_session:
1566 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001567
1568 if (!opts->no_bpf_event)
1569 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001570 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001571}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001572
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001573static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001574{
Kan Liangaad2b212015-01-05 13:23:04 -05001575 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001576
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001577 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001578
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001579 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001580 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001581 callchain->dump_size);
1582}
1583
1584int record_opts__parse_callchain(struct record_opts *record,
1585 struct callchain_param *callchain,
1586 const char *arg, bool unset)
1587{
1588 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001589 callchain->enabled = !unset;
1590
1591 /* --no-call-graph */
1592 if (unset) {
1593 callchain->record_mode = CALLCHAIN_NONE;
1594 pr_debug("callchain: disabled\n");
1595 return 0;
1596 }
1597
1598 ret = parse_callchain_record_opt(arg, callchain);
1599 if (!ret) {
1600 /* Enable data address sampling for DWARF unwind. */
1601 if (callchain->record_mode == CALLCHAIN_DWARF)
1602 record->sample_address = true;
1603 callchain_debug(callchain);
1604 }
1605
1606 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001607}
1608
Kan Liangc421e802015-07-29 05:42:12 -04001609int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001610 const char *arg,
1611 int unset)
1612{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001613 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001614}
1615
Kan Liangc421e802015-07-29 05:42:12 -04001616int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001617 const char *arg __maybe_unused,
1618 int unset __maybe_unused)
1619{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001620 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001621
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001622 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001623
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001624 if (callchain->record_mode == CALLCHAIN_NONE)
1625 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001626
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001627 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001628 return 0;
1629}
1630
Jiri Olsaeb853e82014-02-03 12:44:42 +01001631static int perf_record_config(const char *var, const char *value, void *cb)
1632{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001633 struct record *rec = cb;
1634
1635 if (!strcmp(var, "record.build-id")) {
1636 if (!strcmp(value, "cache"))
1637 rec->no_buildid_cache = false;
1638 else if (!strcmp(value, "no-cache"))
1639 rec->no_buildid_cache = true;
1640 else if (!strcmp(value, "skip"))
1641 rec->no_buildid = true;
1642 else
1643 return -1;
1644 return 0;
1645 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001646 if (!strcmp(var, "record.call-graph")) {
1647 var = "call-graph.record-mode";
1648 return perf_default_config(var, value, cb);
1649 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001650#ifdef HAVE_AIO_SUPPORT
1651 if (!strcmp(var, "record.aio")) {
1652 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1653 if (!rec->opts.nr_cblocks)
1654 rec->opts.nr_cblocks = nr_cblocks_default;
1655 }
1656#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001657
Yisheng Xiecff17202018-03-12 19:25:57 +08001658 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001659}
1660
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001661struct clockid_map {
1662 const char *name;
1663 int clockid;
1664};
1665
1666#define CLOCKID_MAP(n, c) \
1667 { .name = n, .clockid = (c), }
1668
1669#define CLOCKID_END { .name = NULL, }
1670
1671
1672/*
1673 * Add the missing ones, we need to build on many distros...
1674 */
1675#ifndef CLOCK_MONOTONIC_RAW
1676#define CLOCK_MONOTONIC_RAW 4
1677#endif
1678#ifndef CLOCK_BOOTTIME
1679#define CLOCK_BOOTTIME 7
1680#endif
1681#ifndef CLOCK_TAI
1682#define CLOCK_TAI 11
1683#endif
1684
1685static const struct clockid_map clockids[] = {
1686 /* available for all events, NMI safe */
1687 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1688 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1689
1690 /* available for some events */
1691 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1692 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1693 CLOCKID_MAP("tai", CLOCK_TAI),
1694
1695 /* available for the lazy */
1696 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1697 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1698 CLOCKID_MAP("real", CLOCK_REALTIME),
1699 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1700
1701 CLOCKID_END,
1702};
1703
Alexey Budankovcf790512018-10-09 17:36:24 +03001704static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1705{
1706 struct timespec res;
1707
1708 *res_ns = 0;
1709 if (!clock_getres(clk_id, &res))
1710 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1711 else
1712 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1713
1714 return 0;
1715}
1716
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001717static int parse_clockid(const struct option *opt, const char *str, int unset)
1718{
1719 struct record_opts *opts = (struct record_opts *)opt->value;
1720 const struct clockid_map *cm;
1721 const char *ostr = str;
1722
1723 if (unset) {
1724 opts->use_clockid = 0;
1725 return 0;
1726 }
1727
1728 /* no arg passed */
1729 if (!str)
1730 return 0;
1731
1732 /* no setting it twice */
1733 if (opts->use_clockid)
1734 return -1;
1735
1736 opts->use_clockid = true;
1737
1738 /* if its a number, we're done */
1739 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001740 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001741
1742 /* allow a "CLOCK_" prefix to the name */
1743 if (!strncasecmp(str, "CLOCK_", 6))
1744 str += 6;
1745
1746 for (cm = clockids; cm->name; cm++) {
1747 if (!strcasecmp(str, cm->name)) {
1748 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001749 return get_clockid_res(opts->clockid,
1750 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001751 }
1752 }
1753
1754 opts->use_clockid = false;
1755 ui__warning("unknown clockid %s, check man page\n", ostr);
1756 return -1;
1757}
1758
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001759static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1760{
1761 struct record_opts *opts = (struct record_opts *)opt->value;
1762
1763 if (unset || !str)
1764 return 0;
1765
1766 if (!strcasecmp(str, "node"))
1767 opts->affinity = PERF_AFFINITY_NODE;
1768 else if (!strcasecmp(str, "cpu"))
1769 opts->affinity = PERF_AFFINITY_CPU;
1770
1771 return 0;
1772}
1773
Adrian Huntere9db1312015-04-09 18:53:46 +03001774static int record__parse_mmap_pages(const struct option *opt,
1775 const char *str,
1776 int unset __maybe_unused)
1777{
1778 struct record_opts *opts = opt->value;
1779 char *s, *p;
1780 unsigned int mmap_pages;
1781 int ret;
1782
1783 if (!str)
1784 return -EINVAL;
1785
1786 s = strdup(str);
1787 if (!s)
1788 return -ENOMEM;
1789
1790 p = strchr(s, ',');
1791 if (p)
1792 *p = '\0';
1793
1794 if (*s) {
1795 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1796 if (ret)
1797 goto out_free;
1798 opts->mmap_pages = mmap_pages;
1799 }
1800
1801 if (!p) {
1802 ret = 0;
1803 goto out_free;
1804 }
1805
1806 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1807 if (ret)
1808 goto out_free;
1809
1810 opts->auxtrace_mmap_pages = mmap_pages;
1811
1812out_free:
1813 free(s);
1814 return ret;
1815}
1816
Jiri Olsa0c582442017-01-09 10:51:59 +01001817static void switch_output_size_warn(struct record *rec)
1818{
1819 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1820 struct switch_output *s = &rec->switch_output;
1821
1822 wakeup_size /= 2;
1823
1824 if (s->size < wakeup_size) {
1825 char buf[100];
1826
1827 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1828 pr_warning("WARNING: switch-output data size lower than "
1829 "wakeup kernel buffer size (%s) "
1830 "expect bigger perf.data sizes\n", buf);
1831 }
1832}
1833
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001834static int switch_output_setup(struct record *rec)
1835{
1836 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001837 static struct parse_tag tags_size[] = {
1838 { .tag = 'B', .mult = 1 },
1839 { .tag = 'K', .mult = 1 << 10 },
1840 { .tag = 'M', .mult = 1 << 20 },
1841 { .tag = 'G', .mult = 1 << 30 },
1842 { .tag = 0 },
1843 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001844 static struct parse_tag tags_time[] = {
1845 { .tag = 's', .mult = 1 },
1846 { .tag = 'm', .mult = 60 },
1847 { .tag = 'h', .mult = 60*60 },
1848 { .tag = 'd', .mult = 60*60*24 },
1849 { .tag = 0 },
1850 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001851 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001852
1853 if (!s->set)
1854 return 0;
1855
1856 if (!strcmp(s->str, "signal")) {
1857 s->signal = true;
1858 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001859 goto enabled;
1860 }
1861
1862 val = parse_tag_value(s->str, tags_size);
1863 if (val != (unsigned long) -1) {
1864 s->size = val;
1865 pr_debug("switch-output with %s size threshold\n", s->str);
1866 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001867 }
1868
Jiri Olsabfacbe32017-01-09 10:52:00 +01001869 val = parse_tag_value(s->str, tags_time);
1870 if (val != (unsigned long) -1) {
1871 s->time = val;
1872 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1873 s->str, s->time);
1874 goto enabled;
1875 }
1876
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001877 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001878
1879enabled:
1880 rec->timestamp_filename = true;
1881 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001882
1883 if (s->size && !rec->opts.no_buffering)
1884 switch_output_size_warn(rec);
1885
Jiri Olsadc0c6122017-01-09 10:51:58 +01001886 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001887}
1888
Namhyung Kime5b2c202014-10-23 00:15:46 +09001889static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001890 "perf record [<options>] [<command>]",
1891 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001892 NULL
1893};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001894const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001895
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001896/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001897 * XXX Ideally would be local to cmd_record() and passed to a record__new
1898 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001899 * after cmd_record() exits, but since record_options need to be accessible to
1900 * builtin-script, leave it here.
1901 *
1902 * At least we don't ouch it in all the other functions here directly.
1903 *
1904 * Just say no to tons of global variables, sigh.
1905 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001906static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001907 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001908 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001909 .mmap_pages = UINT_MAX,
1910 .user_freq = UINT_MAX,
1911 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001912 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001913 .target = {
1914 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001915 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001916 },
Alexey Budankov470530b2019-03-18 20:40:26 +03001917 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001918 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001919 .tool = {
1920 .sample = process_sample_event,
1921 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001922 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001923 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301924 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001925 .mmap = perf_event__process_mmap,
1926 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001927 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001928 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001929};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001930
Namhyung Kim76a26542015-10-22 23:28:32 +09001931const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1932 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001933
Wang Nan0aab2132016-06-16 08:02:41 +00001934static bool dry_run;
1935
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001936/*
1937 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1938 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001939 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001940 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1941 * using pipes, etc.
1942 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001943static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001944 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001945 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001946 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001947 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001948 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001949 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1950 NULL, "don't record events from perf itself",
1951 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001952 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001953 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001954 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001955 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001956 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001957 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001958 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001959 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001960 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001961 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001962 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001963 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001964 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001965 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001966 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001967 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001968 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001969 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1970 &record.opts.no_inherit_set,
1971 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001972 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1973 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001974 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07001975 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001976 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1977 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001978 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1979 "profile at this frequency",
1980 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001981 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1982 "number of mmap data pages and AUX area tracing mmap pages",
1983 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03001984 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
1985 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
1986 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001987 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001988 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001989 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001990 NULL, "enables call-graph recording" ,
1991 &record_callchain_opt),
1992 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001993 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001994 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001995 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001996 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001997 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001998 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001999 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002000 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002001 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2002 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002003 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002004 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2005 &record.opts.sample_time_set,
2006 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002007 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2008 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002009 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002010 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002011 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2012 &record.no_buildid_cache_set,
2013 "do not update the buildid cache"),
2014 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2015 &record.no_buildid_set,
2016 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002017 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002018 "monitor event in cgroup name only",
2019 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002020 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002021 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002022 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2023 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002024
2025 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2026 "branch any", "sample any taken branches",
2027 parse_branch_stack),
2028
2029 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2030 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002031 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002032 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2033 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002034 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2035 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002036 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2037 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002038 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2039 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002040 " use '-I?' to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002041 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2042 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002043 " use '-I?' to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002044 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2045 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002046 OPT_CALLBACK('k', "clockid", &record.opts,
2047 "clockid", "clockid to use for events, see clock_gettime()",
2048 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002049 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2050 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002051 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002052 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302053 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2054 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002055 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2056 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002057 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2058 "Configure all used events to run in kernel space.",
2059 PARSE_OPT_EXCLUSIVE),
2060 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2061 "Configure all used events to run in user space.",
2062 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00002063 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2064 "clang binary to use for compiling BPF scriptlets"),
2065 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2066 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002067 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2068 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002069 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2070 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002071 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2072 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002073 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2074 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002075 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002076 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2077 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002078 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002079 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2080 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002081 OPT_BOOLEAN(0, "dry-run", &dry_run,
2082 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002083#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002084 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2085 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002086 record__aio_parse),
2087#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002088 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2089 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2090 record__parse_affinity),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002091 OPT_END()
2092};
2093
Namhyung Kime5b2c202014-10-23 00:15:46 +09002094struct option *record_options = __record_options;
2095
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002096int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002097{
Adrian Hunteref149c22015-04-09 18:53:45 +03002098 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002099 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002100 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002101
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002102 setlocale(LC_ALL, "");
2103
Wang Nan48e1cab2015-12-14 10:39:22 +00002104#ifndef HAVE_LIBBPF_SUPPORT
2105# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2106 set_nobuild('\0', "clang-path", true);
2107 set_nobuild('\0', "clang-opt", true);
2108# undef set_nobuild
2109#endif
2110
He Kuang7efe0e02015-12-14 10:39:23 +00002111#ifndef HAVE_BPF_PROLOGUE
2112# if !defined (HAVE_DWARF_SUPPORT)
2113# define REASON "NO_DWARF=1"
2114# elif !defined (HAVE_LIBBPF_SUPPORT)
2115# define REASON "NO_LIBBPF=1"
2116# else
2117# define REASON "this architecture doesn't support BPF prologue"
2118# endif
2119# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2120 set_nobuild('\0', "vmlinux", true);
2121# undef set_nobuild
2122# undef REASON
2123#endif
2124
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002125 CPU_ZERO(&rec->affinity_mask);
2126 rec->opts.affinity = PERF_AFFINITY_SYS;
2127
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002128 rec->evlist = perf_evlist__new();
2129 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002130 return -ENOMEM;
2131
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002132 err = perf_config(perf_record_config, rec);
2133 if (err)
2134 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002135
Tom Zanussibca647a2010-11-10 08:11:30 -06002136 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002137 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002138 if (quiet)
2139 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002140
2141 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002142 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002143 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002144
Namhyung Kimbea03402012-04-26 14:15:15 +09002145 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002146 usage_with_options_msg(record_usage, record_options,
2147 "cgroup monitoring only available in system-wide mode");
2148
Stephane Eranian023695d2011-02-14 11:20:01 +02002149 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002150 if (rec->opts.record_switch_events &&
2151 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002152 ui__error("kernel does not support recording context switch events\n");
2153 parse_options_usage(record_usage, record_options, "switch-events", 0);
2154 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002155 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002156
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002157 if (switch_output_setup(rec)) {
2158 parse_options_usage(record_usage, record_options, "switch-output", 0);
2159 return -EINVAL;
2160 }
2161
Jiri Olsabfacbe32017-01-09 10:52:00 +01002162 if (rec->switch_output.time) {
2163 signal(SIGALRM, alarm_sig_handler);
2164 alarm(rec->switch_output.time);
2165 }
2166
Andi Kleen03724b22019-03-14 15:49:55 -07002167 if (rec->switch_output.num_files) {
2168 rec->switch_output.filenames = calloc(sizeof(char *),
2169 rec->switch_output.num_files);
2170 if (!rec->switch_output.filenames)
2171 return -EINVAL;
2172 }
2173
Adrian Hunter1b36c032016-09-23 17:38:39 +03002174 /*
2175 * Allow aliases to facilitate the lookup of symbols for address
2176 * filters. Refer to auxtrace_parse_filters().
2177 */
2178 symbol_conf.allow_aliases = true;
2179
2180 symbol__init(NULL);
2181
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002182 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002183 if (err)
2184 goto out;
2185
Wang Nan0aab2132016-06-16 08:02:41 +00002186 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002187 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002188
Wang Nand7888572016-04-08 15:07:24 +00002189 err = bpf__setup_stdout(rec->evlist);
2190 if (err) {
2191 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2192 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2193 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002194 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002195 }
2196
Adrian Hunteref149c22015-04-09 18:53:45 +03002197 err = -ENOMEM;
2198
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002199 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002200 pr_warning(
2201"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2202"check /proc/sys/kernel/kptr_restrict.\n\n"
2203"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2204"file is not found in the buildid cache or in the vmlinux path.\n\n"
2205"Samples in kernel modules won't be resolved at all.\n\n"
2206"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2207"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002208
Wang Nan0c1d46a2016-04-20 18:59:52 +00002209 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002210 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002211 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002212 /*
2213 * In 'perf record --switch-output', disable buildid
2214 * generation by default to reduce data file switching
2215 * overhead. Still generate buildid if they are required
2216 * explicitly using
2217 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002218 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002219 * --no-no-buildid-cache
2220 *
2221 * Following code equals to:
2222 *
2223 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2224 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2225 * disable_buildid_cache();
2226 */
2227 bool disable = true;
2228
2229 if (rec->no_buildid_set && !rec->no_buildid)
2230 disable = false;
2231 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2232 disable = false;
2233 if (disable) {
2234 rec->no_buildid = true;
2235 rec->no_buildid_cache = true;
2236 disable_buildid_cache();
2237 }
2238 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002239
Wang Nan4ea648a2016-07-14 08:34:47 +00002240 if (record.opts.overwrite)
2241 record.opts.tail_synthesize = true;
2242
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002243 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002244 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002245 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002246 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002247 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002248
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002249 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2250 rec->opts.no_inherit = true;
2251
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002252 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002253 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002254 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002255 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002256 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002257
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002258 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002259 if (err) {
2260 int saved_errno = errno;
2261
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002262 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002263 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002264
2265 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002266 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002267 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002268
Mengting Zhangca800062017-12-13 15:01:53 +08002269 /* Enable ignoring missing threads when -u/-p option is defined. */
2270 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002271
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002272 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002273 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002274 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002275
Adrian Hunteref149c22015-04-09 18:53:45 +03002276 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2277 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002278 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002279
Namhyung Kim61566812016-01-11 22:37:09 +09002280 /*
2281 * We take all buildids when the file contains
2282 * AUX area tracing data because we do not decode the
2283 * trace because it would take too long.
2284 */
2285 if (rec->opts.full_auxtrace)
2286 rec->buildid_all = true;
2287
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002288 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002289 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002290 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002291 }
2292
Alexey Budankov93f20c02018-11-06 12:07:19 +03002293 if (rec->opts.nr_cblocks > nr_cblocks_max)
2294 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002295 if (verbose > 0)
2296 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2297
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002298 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002299 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002300
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002301 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002302out:
Namhyung Kim45604712014-05-12 09:47:24 +09002303 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002304 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002305 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002306 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002307}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002308
2309static void snapshot_sig_handler(int sig __maybe_unused)
2310{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002311 struct record *rec = &record;
2312
Wang Nan5f9cf592016-04-20 18:59:49 +00002313 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2314 trigger_hit(&auxtrace_snapshot_trigger);
2315 auxtrace_record__snapshot_started = 1;
2316 if (auxtrace_record__snapshot_start(record.itr))
2317 trigger_error(&auxtrace_snapshot_trigger);
2318 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002319
Jiri Olsadc0c6122017-01-09 10:51:58 +01002320 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002321 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002322}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002323
2324static void alarm_sig_handler(int sig __maybe_unused)
2325{
2326 struct record *rec = &record;
2327
2328 if (switch_output_time(rec))
2329 trigger_hit(&switch_output_trigger);
2330}