blob: 386e665a166f26499932df2142f96207218466ce [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 struct perf_evlist *evlist;
77 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300136#ifdef HAVE_AIO_SUPPORT
137static int record__aio_write(struct aiocb *cblock, int trace_fd,
138 void *buf, size_t size, off_t off)
139{
140 int rc;
141
142 cblock->aio_fildes = trace_fd;
143 cblock->aio_buf = buf;
144 cblock->aio_nbytes = size;
145 cblock->aio_offset = off;
146 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
147
148 do {
149 rc = aio_write(cblock);
150 if (rc == 0) {
151 break;
152 } else if (errno != EAGAIN) {
153 cblock->aio_fildes = -1;
154 pr_err("failed to queue perf data, error: %m\n");
155 break;
156 }
157 } while (1);
158
159 return rc;
160}
161
162static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
163{
164 void *rem_buf;
165 off_t rem_off;
166 size_t rem_size;
167 int rc, aio_errno;
168 ssize_t aio_ret, written;
169
170 aio_errno = aio_error(cblock);
171 if (aio_errno == EINPROGRESS)
172 return 0;
173
174 written = aio_ret = aio_return(cblock);
175 if (aio_ret < 0) {
176 if (aio_errno != EINTR)
177 pr_err("failed to write perf data, error: %m\n");
178 written = 0;
179 }
180
181 rem_size = cblock->aio_nbytes - written;
182
183 if (rem_size == 0) {
184 cblock->aio_fildes = -1;
185 /*
186 * md->refcount is incremented in perf_mmap__push() for
187 * every enqueued aio write request so decrement it because
188 * the request is now complete.
189 */
190 perf_mmap__put(md);
191 rc = 1;
192 } else {
193 /*
194 * aio write request may require restart with the
195 * reminder if the kernel didn't write whole
196 * chunk at once.
197 */
198 rem_off = cblock->aio_offset + written;
199 rem_buf = (void *)(cblock->aio_buf + written);
200 record__aio_write(cblock, cblock->aio_fildes,
201 rem_buf, rem_size, rem_off);
202 rc = 0;
203 }
204
205 return rc;
206}
207
Alexey Budankov93f20c02018-11-06 12:07:19 +0300208static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300209{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300210 struct aiocb **aiocb = md->aio.aiocb;
211 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300212 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300214
215 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216 do_suspend = 0;
217 for (i = 0; i < md->aio.nr_cblocks; ++i) {
218 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
219 if (sync_all)
220 aiocb[i] = NULL;
221 else
222 return i;
223 } else {
224 /*
225 * Started aio write is not complete yet
226 * so it has to be waited before the
227 * next allocation.
228 */
229 aiocb[i] = &cblocks[i];
230 do_suspend = 1;
231 }
232 }
233 if (!do_suspend)
234 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300235
Alexey Budankov93f20c02018-11-06 12:07:19 +0300236 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300237 if (!(errno == EAGAIN || errno == EINTR))
238 pr_err("failed to sync perf data, error: %m\n");
239 }
240 } while (1);
241}
242
243static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
244{
245 struct record *rec = to;
246 int ret, trace_fd = rec->session->data->file.fd;
247
248 rec->samples++;
249
250 ret = record__aio_write(cblock, trace_fd, bf, size, off);
251 if (!ret) {
252 rec->bytes_written += size;
253 if (switch_output_size(rec))
254 trigger_hit(&switch_output_trigger);
255 }
256
257 return ret;
258}
259
260static off_t record__aio_get_pos(int trace_fd)
261{
262 return lseek(trace_fd, 0, SEEK_CUR);
263}
264
265static void record__aio_set_pos(int trace_fd, off_t pos)
266{
267 lseek(trace_fd, pos, SEEK_SET);
268}
269
270static void record__aio_mmap_read_sync(struct record *rec)
271{
272 int i;
273 struct perf_evlist *evlist = rec->evlist;
274 struct perf_mmap *maps = evlist->mmap;
275
276 if (!rec->opts.nr_cblocks)
277 return;
278
279 for (i = 0; i < evlist->nr_mmaps; i++) {
280 struct perf_mmap *map = &maps[i];
281
282 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300283 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300284 }
285}
286
287static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300288static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300289
290static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300291 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300292 int unset)
293{
294 struct record_opts *opts = (struct record_opts *)opt->value;
295
Alexey Budankov93f20c02018-11-06 12:07:19 +0300296 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300297 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300298 } else {
299 if (str)
300 opts->nr_cblocks = strtol(str, NULL, 0);
301 if (!opts->nr_cblocks)
302 opts->nr_cblocks = nr_cblocks_default;
303 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300304
305 return 0;
306}
307#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300308static int nr_cblocks_max = 0;
309
310static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300311{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300312 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300313}
314
315static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
316 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
317{
318 return -1;
319}
320
321static off_t record__aio_get_pos(int trace_fd __maybe_unused)
322{
323 return -1;
324}
325
326static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
327{
328}
329
330static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
331{
332}
333#endif
334
335static int record__aio_enabled(struct record *rec)
336{
337 return rec->opts.nr_cblocks > 0;
338}
339
Alexey Budankov470530b2019-03-18 20:40:26 +0300340#define MMAP_FLUSH_DEFAULT 1
341static int record__mmap_flush_parse(const struct option *opt,
342 const char *str,
343 int unset)
344{
345 int flush_max;
346 struct record_opts *opts = (struct record_opts *)opt->value;
347 static struct parse_tag tags[] = {
348 { .tag = 'B', .mult = 1 },
349 { .tag = 'K', .mult = 1 << 10 },
350 { .tag = 'M', .mult = 1 << 20 },
351 { .tag = 'G', .mult = 1 << 30 },
352 { .tag = 0 },
353 };
354
355 if (unset)
356 return 0;
357
358 if (str) {
359 opts->mmap_flush = parse_tag_value(str, tags);
360 if (opts->mmap_flush == (int)-1)
361 opts->mmap_flush = strtol(str, NULL, 0);
362 }
363
364 if (!opts->mmap_flush)
365 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
366
367 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
368 flush_max /= 4;
369 if (opts->mmap_flush > flush_max)
370 opts->mmap_flush = flush_max;
371
372 return 0;
373}
374
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200375static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200376 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300377 struct perf_sample *sample __maybe_unused,
378 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200379{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300380 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200381 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200382}
383
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200384static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300385{
386 struct record *rec = to;
387
388 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200389 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300390}
391
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300392static volatile int done;
393static volatile int signr = -1;
394static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000395
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300396static void sig_handler(int sig)
397{
398 if (sig == SIGCHLD)
399 child_finished = 1;
400 else
401 signr = sig;
402
403 done = 1;
404}
405
Wang Nana0748652016-11-26 07:03:28 +0000406static void sigsegv_handler(int sig)
407{
408 perf_hooks__recover();
409 sighandler_dump_stack(sig);
410}
411
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300412static void record__sig_exit(void)
413{
414 if (signr == -1)
415 return;
416
417 signal(signr, SIG_DFL);
418 raise(signr);
419}
420
Adrian Huntere31f0d02015-04-30 17:37:27 +0300421#ifdef HAVE_AUXTRACE_SUPPORT
422
Adrian Hunteref149c22015-04-09 18:53:45 +0300423static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200424 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300425 union perf_event *event, void *data1,
426 size_t len1, void *data2, size_t len2)
427{
428 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100429 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300430 size_t padding;
431 u8 pad[8] = {0};
432
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100433 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300434 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100435 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300436 int err;
437
438 file_offset = lseek(fd, 0, SEEK_CUR);
439 if (file_offset == -1)
440 return -1;
441 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
442 event, file_offset);
443 if (err)
444 return err;
445 }
446
Adrian Hunteref149c22015-04-09 18:53:45 +0300447 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
448 padding = (len1 + len2) & 7;
449 if (padding)
450 padding = 8 - padding;
451
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200452 record__write(rec, map, event, event->header.size);
453 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300454 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200455 record__write(rec, map, data2, len2);
456 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300457
458 return 0;
459}
460
461static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200462 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300463{
464 int ret;
465
Jiri Olsae035f4c2018-09-13 14:54:05 +0200466 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300467 record__process_auxtrace);
468 if (ret < 0)
469 return ret;
470
471 if (ret)
472 rec->samples++;
473
474 return 0;
475}
476
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300477static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200478 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300479{
480 int ret;
481
Jiri Olsae035f4c2018-09-13 14:54:05 +0200482 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300483 record__process_auxtrace,
484 rec->opts.auxtrace_snapshot_size);
485 if (ret < 0)
486 return ret;
487
488 if (ret)
489 rec->samples++;
490
491 return 0;
492}
493
494static int record__auxtrace_read_snapshot_all(struct record *rec)
495{
496 int i;
497 int rc = 0;
498
499 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200500 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300501
Jiri Olsae035f4c2018-09-13 14:54:05 +0200502 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300503 continue;
504
Jiri Olsae035f4c2018-09-13 14:54:05 +0200505 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300506 rc = -1;
507 goto out;
508 }
509 }
510out:
511 return rc;
512}
513
514static void record__read_auxtrace_snapshot(struct record *rec)
515{
516 pr_debug("Recording AUX area tracing snapshot\n");
517 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000518 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300519 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000520 if (auxtrace_record__snapshot_finish(rec->itr))
521 trigger_error(&auxtrace_snapshot_trigger);
522 else
523 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300524 }
525}
526
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200527static int record__auxtrace_init(struct record *rec)
528{
529 int err;
530
531 if (!rec->itr) {
532 rec->itr = auxtrace_record__init(rec->evlist, &err);
533 if (err)
534 return err;
535 }
536
537 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
538 rec->opts.auxtrace_snapshot_opts);
539 if (err)
540 return err;
541
542 return auxtrace_parse_filters(rec->evlist);
543}
544
Adrian Huntere31f0d02015-04-30 17:37:27 +0300545#else
546
547static inline
548int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200549 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300550{
551 return 0;
552}
553
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300554static inline
555void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
556{
557}
558
559static inline
560int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
561{
562 return 0;
563}
564
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200565static int record__auxtrace_init(struct record *rec __maybe_unused)
566{
567 return 0;
568}
569
Adrian Huntere31f0d02015-04-30 17:37:27 +0300570#endif
571
Wang Nancda57a82016-06-27 10:24:03 +0000572static int record__mmap_evlist(struct record *rec,
573 struct perf_evlist *evlist)
574{
575 struct record_opts *opts = &rec->opts;
576 char msg[512];
577
Alexey Budankovf13de662019-01-22 20:50:57 +0300578 if (opts->affinity != PERF_AFFINITY_SYS)
579 cpu__setup_cpunode_map();
580
Wang Nan7a276ff2017-12-03 02:00:38 +0000581 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000582 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300583 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300584 opts->nr_cblocks, opts->affinity,
585 opts->mmap_flush) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000586 if (errno == EPERM) {
587 pr_err("Permission error mapping pages.\n"
588 "Consider increasing "
589 "/proc/sys/kernel/perf_event_mlock_kb,\n"
590 "or try again with a smaller value of -m/--mmap_pages.\n"
591 "(current value: %u,%u)\n",
592 opts->mmap_pages, opts->auxtrace_mmap_pages);
593 return -errno;
594 } else {
595 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300596 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000597 if (errno)
598 return -errno;
599 else
600 return -EINVAL;
601 }
602 }
603 return 0;
604}
605
606static int record__mmap(struct record *rec)
607{
608 return record__mmap_evlist(rec, rec->evlist);
609}
610
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300611static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200612{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300613 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200614 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200615 struct perf_evlist *evlist = rec->evlist;
616 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300617 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600618 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200619
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300620 /*
621 * For initial_delay we need to add a dummy event so that we can track
622 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
623 * real events, the ones asked by the user.
624 */
625 if (opts->initial_delay) {
626 if (perf_evlist__add_dummy(evlist))
627 return -ENOMEM;
628
629 pos = perf_evlist__first(evlist);
630 pos->tracking = 0;
631 pos = perf_evlist__last(evlist);
632 pos->tracking = 1;
633 pos->attr.enable_on_exec = 1;
634 }
635
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300636 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100637
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300638 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200639try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400640 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300641 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900642 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300643 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300644 goto try_again;
645 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700646 if ((errno == EINVAL || errno == EBADF) &&
647 pos->leader != pos &&
648 pos->weak_group) {
649 pos = perf_evlist__reset_weak_group(evlist, pos);
650 goto try_again;
651 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300652 rc = -errno;
653 perf_evsel__open_strerror(pos, &opts->target,
654 errno, msg, sizeof(msg));
655 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600656 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300657 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800658
659 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800660 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200661
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300662 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300663 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300664 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300665 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600666 rc = -1;
667 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100668 }
669
Wang Nancda57a82016-06-27 10:24:03 +0000670 rc = record__mmap(rec);
671 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600672 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200673
Jiri Olsa563aecb2013-06-05 13:35:06 +0200674 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300675 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600676out:
677 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200678}
679
Namhyung Kime3d59112015-01-29 17:06:44 +0900680static int process_sample_event(struct perf_tool *tool,
681 union perf_event *event,
682 struct perf_sample *sample,
683 struct perf_evsel *evsel,
684 struct machine *machine)
685{
686 struct record *rec = container_of(tool, struct record, tool);
687
Jin Yao68588ba2017-12-08 21:13:42 +0800688 if (rec->evlist->first_sample_time == 0)
689 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900690
Jin Yao68588ba2017-12-08 21:13:42 +0800691 rec->evlist->last_sample_time = sample->time;
692
693 if (rec->buildid_all)
694 return 0;
695
696 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900697 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
698}
699
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300700static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200701{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200702 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200703
Jiri Olsa45112e82019-02-21 10:41:29 +0100704 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300705 return 0;
706
Namhyung Kim00dc8652014-11-04 10:14:32 +0900707 /*
708 * During this process, it'll load kernel map and replace the
709 * dso->long_name to a real pathname it found. In this case
710 * we prefer the vmlinux path like
711 * /lib/modules/3.16.4/build/vmlinux
712 *
713 * rather than build-id path (in debug directory).
714 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
715 */
716 symbol_conf.ignore_vmlinux_buildid = true;
717
Namhyung Kim61566812016-01-11 22:37:09 +0900718 /*
719 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800720 * so no need to process samples. But if timestamp_boundary is enabled,
721 * it still needs to walk on all samples to get the timestamps of
722 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900723 */
Jin Yao68588ba2017-12-08 21:13:42 +0800724 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900725 rec->tool.sample = NULL;
726
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300727 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200728}
729
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200730static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800731{
732 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200733 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800734 /*
735 *As for guest kernel when processing subcommand record&report,
736 *we arrange module mmap prior to guest kernel mmap and trigger
737 *a preload dso because default guest module symbols are loaded
738 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
739 *method is used to avoid symbol missing when the first addr is
740 *in module instead of in guest kernel.
741 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200742 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200743 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800744 if (err < 0)
745 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300746 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800747
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800748 /*
749 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
750 * have no _text sometimes.
751 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200752 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200753 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800754 if (err < 0)
755 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300756 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800757}
758
Frederic Weisbecker98402802010-05-02 22:05:29 +0200759static struct perf_event_header finished_round_event = {
760 .size = sizeof(struct perf_event_header),
761 .type = PERF_RECORD_FINISHED_ROUND,
762};
763
Alexey Budankovf13de662019-01-22 20:50:57 +0300764static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
765{
766 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
767 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
768 CPU_ZERO(&rec->affinity_mask);
769 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
770 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
771 }
772}
773
Wang Nana4ea0ec2016-07-14 08:34:36 +0000774static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300775 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200776{
Jiri Olsadcabb502014-07-25 16:56:16 +0200777 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200778 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600779 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000780 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300781 int trace_fd = rec->data.file.fd;
782 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200783
Wang Nancb216862016-06-27 10:24:04 +0000784 if (!evlist)
785 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300786
Wang Nan0b72d692017-12-04 16:51:07 +0000787 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000788 if (!maps)
789 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000790
Wang Nan0b72d692017-12-04 16:51:07 +0000791 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000792 return 0;
793
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300794 if (record__aio_enabled(rec))
795 off = record__aio_get_pos(trace_fd);
796
Wang Nana4ea0ec2016-07-14 08:34:36 +0000797 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300798 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200799 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000800
Jiri Olsae035f4c2018-09-13 14:54:05 +0200801 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300802 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300803 if (synch) {
804 flush = map->flush;
805 map->flush = 1;
806 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300807 if (!record__aio_enabled(rec)) {
808 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300809 if (synch)
810 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300811 rc = -1;
812 goto out;
813 }
814 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300815 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300816 /*
817 * Call record__aio_sync() to wait till map->data buffer
818 * becomes available after previous aio write request.
819 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300820 idx = record__aio_sync(map, false);
821 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300822 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300823 if (synch)
824 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300825 rc = -1;
826 goto out;
827 }
David Ahern8d3eca22012-08-26 12:24:47 -0600828 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300829 if (synch)
830 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600831 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300832
Jiri Olsae035f4c2018-09-13 14:54:05 +0200833 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
834 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300835 rc = -1;
836 goto out;
837 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200838 }
839
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300840 if (record__aio_enabled(rec))
841 record__aio_set_pos(trace_fd, off);
842
Jiri Olsadcabb502014-07-25 16:56:16 +0200843 /*
844 * Mark the round finished in case we wrote
845 * at least one event.
846 */
847 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200848 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600849
Wang Nan0b72d692017-12-04 16:51:07 +0000850 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000851 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600852out:
853 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200854}
855
Alexey Budankov470530b2019-03-18 20:40:26 +0300856static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +0000857{
858 int err;
859
Alexey Budankov470530b2019-03-18 20:40:26 +0300860 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +0000861 if (err)
862 return err;
863
Alexey Budankov470530b2019-03-18 20:40:26 +0300864 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +0000865}
866
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300867static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700868{
David Ahern57706ab2013-11-06 11:41:34 -0700869 struct perf_session *session = rec->session;
870 int feat;
871
872 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
873 perf_header__set_feat(&session->header, feat);
874
875 if (rec->no_buildid)
876 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
877
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300878 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700879 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
880
881 if (!rec->opts.branch_stack)
882 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300883
884 if (!rec->opts.full_auxtrace)
885 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100886
Alexey Budankovcf790512018-10-09 17:36:24 +0300887 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
888 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
889
Jiri Olsa258031c2019-03-08 14:47:39 +0100890 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
891
Jiri Olsaffa517a2015-10-25 15:51:43 +0100892 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700893}
894
Wang Nane1ab48b2016-02-26 09:32:10 +0000895static void
896record__finish_output(struct record *rec)
897{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100898 struct perf_data *data = &rec->data;
899 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000900
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100901 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000902 return;
903
904 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +0100905 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000906
907 if (!rec->no_buildid) {
908 process_buildids(rec);
909
910 if (rec->buildid_all)
911 dsos__hit_all(rec->session);
912 }
913 perf_session__write_header(rec->session, rec->evlist, fd, true);
914
915 return;
916}
917
Wang Nan4ea648a2016-07-14 08:34:47 +0000918static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000919{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300920 int err;
921 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000922
Wang Nan4ea648a2016-07-14 08:34:47 +0000923 if (rec->opts.tail_synthesize != tail)
924 return 0;
925
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300926 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
927 if (thread_map == NULL)
928 return -1;
929
930 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000931 process_synthesized_event,
932 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800933 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300934 thread_map__put(thread_map);
935 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000936}
937
Wang Nan4ea648a2016-07-14 08:34:47 +0000938static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000939
Wang Nanecfd7a92016-04-13 08:21:07 +0000940static int
941record__switch_output(struct record *rec, bool at_exit)
942{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100943 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000944 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -0700945 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +0000946
947 /* Same Size: "2015122520103046"*/
948 char timestamp[] = "InvalidTimestamp";
949
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300950 record__aio_mmap_read_sync(rec);
951
Wang Nan4ea648a2016-07-14 08:34:47 +0000952 record__synthesize(rec, true);
953 if (target__none(&rec->opts.target))
954 record__synthesize_workload(rec, true);
955
Wang Nanecfd7a92016-04-13 08:21:07 +0000956 rec->samples = 0;
957 record__finish_output(rec);
958 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
959 if (err) {
960 pr_err("Failed to get current timestamp\n");
961 return -EINVAL;
962 }
963
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100964 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000965 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -0700966 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +0000967 if (fd >= 0 && !at_exit) {
968 rec->bytes_written = 0;
969 rec->session->header.data_size = 0;
970 }
971
972 if (!quiet)
973 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +0100974 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000975
Andi Kleen03724b22019-03-14 15:49:55 -0700976 if (rec->switch_output.num_files) {
977 int n = rec->switch_output.cur_file + 1;
978
979 if (n >= rec->switch_output.num_files)
980 n = 0;
981 rec->switch_output.cur_file = n;
982 if (rec->switch_output.filenames[n]) {
983 remove(rec->switch_output.filenames[n]);
984 free(rec->switch_output.filenames[n]);
985 }
986 rec->switch_output.filenames[n] = new_filename;
987 } else {
988 free(new_filename);
989 }
990
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000991 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000992 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000993 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000994
Wang Nanbe7b0c92016-04-20 18:59:54 +0000995 /*
996 * In 'perf record --switch-output' without -a,
997 * record__synthesize() in record__switch_output() won't
998 * generate tracking events because there's no thread_map
999 * in evlist. Which causes newly created perf.data doesn't
1000 * contain map and comm information.
1001 * Create a fake thread_map and directly call
1002 * perf_event__synthesize_thread_map() for those events.
1003 */
1004 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001005 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001006 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001007 return fd;
1008}
1009
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001010static volatile int workload_exec_errno;
1011
1012/*
1013 * perf_evlist__prepare_workload will send a SIGUSR1
1014 * if the fork fails, since we asked by setting its
1015 * want_signal to true.
1016 */
Namhyung Kim45604712014-05-12 09:47:24 +09001017static void workload_exec_failed_signal(int signo __maybe_unused,
1018 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001019 void *ucontext __maybe_unused)
1020{
1021 workload_exec_errno = info->si_value.sival_int;
1022 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001023 child_finished = 1;
1024}
1025
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001026static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001027static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001028
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001029int __weak
1030perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1031 struct perf_tool *tool __maybe_unused,
1032 perf_event__handler_t process __maybe_unused,
1033 struct machine *machine __maybe_unused)
1034{
1035 return 0;
1036}
1037
Wang Nanee667f92016-06-27 10:24:05 +00001038static const struct perf_event_mmap_page *
1039perf_evlist__pick_pc(struct perf_evlist *evlist)
1040{
Wang Nanb2cb6152016-07-14 08:34:39 +00001041 if (evlist) {
1042 if (evlist->mmap && evlist->mmap[0].base)
1043 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001044 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1045 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001046 }
Wang Nanee667f92016-06-27 10:24:05 +00001047 return NULL;
1048}
1049
Wang Nanc45628b2016-05-24 02:28:59 +00001050static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1051{
Wang Nanee667f92016-06-27 10:24:05 +00001052 const struct perf_event_mmap_page *pc;
1053
1054 pc = perf_evlist__pick_pc(rec->evlist);
1055 if (pc)
1056 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001057 return NULL;
1058}
1059
Wang Nan4ea648a2016-07-14 08:34:47 +00001060static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001061{
1062 struct perf_session *session = rec->session;
1063 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001064 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001065 struct record_opts *opts = &rec->opts;
1066 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001067 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001068 int err = 0;
1069
Wang Nan4ea648a2016-07-14 08:34:47 +00001070 if (rec->opts.tail_synthesize != tail)
1071 return 0;
1072
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001073 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001074 /*
1075 * We need to synthesize events first, because some
1076 * features works on top of them (on report side).
1077 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001078 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001079 process_synthesized_event);
1080 if (err < 0) {
1081 pr_err("Couldn't synthesize attrs.\n");
1082 goto out;
1083 }
1084
Jiri Olsaa2015512018-03-14 10:22:04 +01001085 err = perf_event__synthesize_features(tool, session, rec->evlist,
1086 process_synthesized_event);
1087 if (err < 0) {
1088 pr_err("Couldn't synthesize features.\n");
1089 return err;
1090 }
1091
Wang Nanc45c86e2016-02-26 09:32:07 +00001092 if (have_tracepoints(&rec->evlist->entries)) {
1093 /*
1094 * FIXME err <= 0 here actually means that
1095 * there were no tracepoints so its not really
1096 * an error, just that we don't need to
1097 * synthesize anything. We really have to
1098 * return this more properly and also
1099 * propagate errors that now are calling die()
1100 */
1101 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1102 process_synthesized_event);
1103 if (err <= 0) {
1104 pr_err("Couldn't record tracing data.\n");
1105 goto out;
1106 }
1107 rec->bytes_written += err;
1108 }
1109 }
1110
Wang Nanc45628b2016-05-24 02:28:59 +00001111 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001112 process_synthesized_event, machine);
1113 if (err)
1114 goto out;
1115
Wang Nanc45c86e2016-02-26 09:32:07 +00001116 if (rec->opts.full_auxtrace) {
1117 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1118 session, process_synthesized_event);
1119 if (err)
1120 goto out;
1121 }
1122
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001123 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1124 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1125 machine);
1126 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1127 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1128 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001129
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001130 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1131 machine);
1132 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1133 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1134 "Check /proc/modules permission or run as root.\n");
1135 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001136
1137 if (perf_guest) {
1138 machines__process_guests(&session->machines,
1139 perf_event__synthesize_guest_os, tool);
1140 }
1141
Andi Kleenbfd8f722017-11-17 13:42:58 -08001142 err = perf_event__synthesize_extra_attr(&rec->tool,
1143 rec->evlist,
1144 process_synthesized_event,
1145 data->is_pipe);
1146 if (err)
1147 goto out;
1148
Andi Kleen373565d2017-11-17 13:42:59 -08001149 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1150 process_synthesized_event,
1151 NULL);
1152 if (err < 0) {
1153 pr_err("Couldn't synthesize thread map.\n");
1154 return err;
1155 }
1156
1157 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1158 process_synthesized_event, NULL);
1159 if (err < 0) {
1160 pr_err("Couldn't synthesize cpu map.\n");
1161 return err;
1162 }
1163
Song Liue5416952019-03-11 22:30:41 -07001164 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001165 machine, opts);
1166 if (err < 0)
1167 pr_warning("Couldn't synthesize bpf events.\n");
1168
Wang Nanc45c86e2016-02-26 09:32:07 +00001169 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1170 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001171 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001172out:
1173 return err;
1174}
1175
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001176static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001177{
David Ahern57706ab2013-11-06 11:41:34 -07001178 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001179 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001180 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001181 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001182 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001183 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001184 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001185 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001186 bool disabled = false, draining = false;
Song Liu657ee552019-03-11 22:30:50 -07001187 struct perf_evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001188 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001189 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001190
Namhyung Kim45604712014-05-12 09:47:24 +09001191 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001192 signal(SIGCHLD, sig_handler);
1193 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001194 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001195 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001196
Hari Bathinif3b36142017-03-08 02:11:43 +05301197 if (rec->opts.record_namespaces)
1198 tool->namespace_events = true;
1199
Jiri Olsadc0c6122017-01-09 10:51:58 +01001200 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001201 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001202 if (rec->opts.auxtrace_snapshot_mode)
1203 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001204 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001205 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001206 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001207 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001208 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001209
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001210 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001211 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001212 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001213 return -1;
1214 }
1215
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001216 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001217 rec->session = session;
1218
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001219 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001220
Alexey Budankovcf790512018-10-09 17:36:24 +03001221 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1222 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1223
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001224 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001225 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001226 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001227 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001228 if (err < 0) {
1229 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001230 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001231 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001232 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001233 }
1234
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001235 /*
1236 * If we have just single event and are sending data
1237 * through pipe, we need to force the ids allocation,
1238 * because we synthesize event name through the pipe
1239 * and need the id for that.
1240 */
1241 if (data->is_pipe && rec->evlist->nr_entries == 1)
1242 rec->opts.sample_id = true;
1243
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001244 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001245 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001246 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001247 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001248
Wang Nan8690a2a2016-02-22 09:10:32 +00001249 err = bpf__apply_obj_config();
1250 if (err) {
1251 char errbuf[BUFSIZ];
1252
1253 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1254 pr_err("ERROR: Apply config to BPF failed: %s\n",
1255 errbuf);
1256 goto out_child;
1257 }
1258
Adrian Huntercca84822015-08-19 17:29:21 +03001259 /*
1260 * Normally perf_session__new would do this, but it doesn't have the
1261 * evlist.
1262 */
1263 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1264 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1265 rec->tool.ordered_events = false;
1266 }
1267
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001268 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001269 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1270
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001271 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001272 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001273 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001274 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001275 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001276 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001277 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001278 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001279 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001280
David Ahernd3665492012-02-06 15:27:52 -07001281 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001282 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001283 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001284 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001285 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001286 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001287 }
1288
Song Liud56354d2019-03-11 22:30:51 -07001289 if (!opts->no_bpf_event)
1290 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1291
Song Liu657ee552019-03-11 22:30:50 -07001292 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1293 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1294 opts->no_bpf_event = true;
1295 }
1296
Wang Nan4ea648a2016-07-14 08:34:47 +00001297 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001298 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001299 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001300
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001301 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001302 struct sched_param param;
1303
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001304 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001305 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001306 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001307 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001308 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001309 }
1310 }
1311
Jiri Olsa774cb492012-11-12 18:34:01 +01001312 /*
1313 * When perf is starting the traced process, all the events
1314 * (apart from group members) have enable_on_exec=1 set,
1315 * so don't spoil it by prematurely enabling them.
1316 */
Andi Kleen6619a532014-01-11 13:38:27 -08001317 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001318 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001319
Peter Zijlstra856e9662009-12-16 17:55:55 +01001320 /*
1321 * Let the child rip
1322 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001323 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001324 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001325 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301326 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001327
1328 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1329 if (event == NULL) {
1330 err = -ENOMEM;
1331 goto out_child;
1332 }
1333
Namhyung Kime803cf92015-09-22 09:24:55 +09001334 /*
1335 * Some H/W events are generated before COMM event
1336 * which is emitted during exec(), so perf script
1337 * cannot see a correct process name for those events.
1338 * Synthesize COMM event to prevent it.
1339 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301340 tgid = perf_event__synthesize_comm(tool, event,
1341 rec->evlist->workload.pid,
1342 process_synthesized_event,
1343 machine);
1344 free(event);
1345
1346 if (tgid == -1)
1347 goto out_child;
1348
1349 event = malloc(sizeof(event->namespaces) +
1350 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1351 machine->id_hdr_size);
1352 if (event == NULL) {
1353 err = -ENOMEM;
1354 goto out_child;
1355 }
1356
1357 /*
1358 * Synthesize NAMESPACES event for the command specified.
1359 */
1360 perf_event__synthesize_namespaces(tool, event,
1361 rec->evlist->workload.pid,
1362 tgid, process_synthesized_event,
1363 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001364 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001365
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001366 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001367 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001368
Andi Kleen6619a532014-01-11 13:38:27 -08001369 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001370 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001371 perf_evlist__enable(rec->evlist);
1372 }
1373
Wang Nan5f9cf592016-04-20 18:59:49 +00001374 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001375 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001376 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001377 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001378 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001379
Wang Nan057374642016-07-14 08:34:43 +00001380 /*
1381 * rec->evlist->bkw_mmap_state is possible to be
1382 * BKW_MMAP_EMPTY here: when done == true and
1383 * hits != rec->samples in previous round.
1384 *
1385 * perf_evlist__toggle_bkw_mmap ensure we never
1386 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1387 */
1388 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1389 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1390
Alexey Budankov470530b2019-03-18 20:40:26 +03001391 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001392 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001393 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001394 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001395 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001396 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001397
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001398 if (auxtrace_record__snapshot_started) {
1399 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001400 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001401 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001402 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001403 pr_err("AUX area tracing snapshot failed\n");
1404 err = -1;
1405 goto out_child;
1406 }
1407 }
1408
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001409 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001410 /*
1411 * If switch_output_trigger is hit, the data in
1412 * overwritable ring buffer should have been collected,
1413 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1414 *
1415 * If SIGUSR2 raise after or during record__mmap_read_all(),
1416 * record__mmap_read_all() didn't collect data from
1417 * overwritable ring buffer. Read again.
1418 */
1419 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1420 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001421 trigger_ready(&switch_output_trigger);
1422
Wang Nan057374642016-07-14 08:34:43 +00001423 /*
1424 * Reenable events in overwrite ring buffer after
1425 * record__mmap_read_all(): we should have collected
1426 * data from it.
1427 */
1428 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1429
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001430 if (!quiet)
1431 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1432 waking);
1433 waking = 0;
1434 fd = record__switch_output(rec, false);
1435 if (fd < 0) {
1436 pr_err("Failed to switch to new file\n");
1437 trigger_error(&switch_output_trigger);
1438 err = fd;
1439 goto out_child;
1440 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001441
1442 /* re-arm the alarm */
1443 if (rec->switch_output.time)
1444 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001445 }
1446
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001447 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001448 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001449 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001450 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001451 /*
1452 * Propagate error, only if there's any. Ignore positive
1453 * number of returned events and interrupt error.
1454 */
1455 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001456 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001457 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001458
1459 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1460 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001461 }
1462
Jiri Olsa774cb492012-11-12 18:34:01 +01001463 /*
1464 * When perf is starting the traced process, at the end events
1465 * die with the process and we wait for that. Thus no need to
1466 * disable events in this case.
1467 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001468 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001469 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001470 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001471 disabled = true;
1472 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001473 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001474 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001475 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001476
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001477 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001478 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001479 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001480 pr_err("Workload failed: %s\n", emsg);
1481 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001482 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001483 }
1484
Namhyung Kime3d59112015-01-29 17:06:44 +09001485 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001486 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001487
Wang Nan4ea648a2016-07-14 08:34:47 +00001488 if (target__none(&rec->opts.target))
1489 record__synthesize_workload(rec, true);
1490
Namhyung Kim45604712014-05-12 09:47:24 +09001491out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001492 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001493 record__aio_mmap_read_sync(rec);
1494
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001495 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1496 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1497 session->header.env.comp_ratio = ratio + 0.5;
1498 }
1499
Namhyung Kim45604712014-05-12 09:47:24 +09001500 if (forks) {
1501 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001502
Namhyung Kim45604712014-05-12 09:47:24 +09001503 if (!child_finished)
1504 kill(rec->evlist->workload.pid, SIGTERM);
1505
1506 wait(&exit_status);
1507
1508 if (err < 0)
1509 status = err;
1510 else if (WIFEXITED(exit_status))
1511 status = WEXITSTATUS(exit_status);
1512 else if (WIFSIGNALED(exit_status))
1513 signr = WTERMSIG(exit_status);
1514 } else
1515 status = err;
1516
Wang Nan4ea648a2016-07-14 08:34:47 +00001517 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001518 /* this will be recalculated during process_buildids() */
1519 rec->samples = 0;
1520
Wang Nanecfd7a92016-04-13 08:21:07 +00001521 if (!err) {
1522 if (!rec->timestamp_filename) {
1523 record__finish_output(rec);
1524 } else {
1525 fd = record__switch_output(rec, true);
1526 if (fd < 0) {
1527 status = fd;
1528 goto out_delete_session;
1529 }
1530 }
1531 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001532
Wang Nana0748652016-11-26 07:03:28 +00001533 perf_hooks__invoke_record_end();
1534
Namhyung Kime3d59112015-01-29 17:06:44 +09001535 if (!err && !quiet) {
1536 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001537 const char *postfix = rec->timestamp_filename ?
1538 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001539
Adrian Hunteref149c22015-04-09 18:53:45 +03001540 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001541 scnprintf(samples, sizeof(samples),
1542 " (%" PRIu64 " samples)", rec->samples);
1543 else
1544 samples[0] = '\0';
1545
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001546 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001547 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001548 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001549 if (ratio) {
1550 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1551 rec->session->bytes_transferred / 1024.0 / 1024.0,
1552 ratio);
1553 }
1554 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001555 }
1556
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001557out_delete_session:
1558 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001559
1560 if (!opts->no_bpf_event)
1561 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001562 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001563}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001564
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001565static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001566{
Kan Liangaad2b212015-01-05 13:23:04 -05001567 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001568
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001569 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001570
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001571 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001572 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001573 callchain->dump_size);
1574}
1575
1576int record_opts__parse_callchain(struct record_opts *record,
1577 struct callchain_param *callchain,
1578 const char *arg, bool unset)
1579{
1580 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001581 callchain->enabled = !unset;
1582
1583 /* --no-call-graph */
1584 if (unset) {
1585 callchain->record_mode = CALLCHAIN_NONE;
1586 pr_debug("callchain: disabled\n");
1587 return 0;
1588 }
1589
1590 ret = parse_callchain_record_opt(arg, callchain);
1591 if (!ret) {
1592 /* Enable data address sampling for DWARF unwind. */
1593 if (callchain->record_mode == CALLCHAIN_DWARF)
1594 record->sample_address = true;
1595 callchain_debug(callchain);
1596 }
1597
1598 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001599}
1600
Kan Liangc421e802015-07-29 05:42:12 -04001601int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001602 const char *arg,
1603 int unset)
1604{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001605 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001606}
1607
Kan Liangc421e802015-07-29 05:42:12 -04001608int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001609 const char *arg __maybe_unused,
1610 int unset __maybe_unused)
1611{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001612 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001613
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001614 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001615
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001616 if (callchain->record_mode == CALLCHAIN_NONE)
1617 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001618
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001619 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001620 return 0;
1621}
1622
Jiri Olsaeb853e82014-02-03 12:44:42 +01001623static int perf_record_config(const char *var, const char *value, void *cb)
1624{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001625 struct record *rec = cb;
1626
1627 if (!strcmp(var, "record.build-id")) {
1628 if (!strcmp(value, "cache"))
1629 rec->no_buildid_cache = false;
1630 else if (!strcmp(value, "no-cache"))
1631 rec->no_buildid_cache = true;
1632 else if (!strcmp(value, "skip"))
1633 rec->no_buildid = true;
1634 else
1635 return -1;
1636 return 0;
1637 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001638 if (!strcmp(var, "record.call-graph")) {
1639 var = "call-graph.record-mode";
1640 return perf_default_config(var, value, cb);
1641 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001642#ifdef HAVE_AIO_SUPPORT
1643 if (!strcmp(var, "record.aio")) {
1644 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1645 if (!rec->opts.nr_cblocks)
1646 rec->opts.nr_cblocks = nr_cblocks_default;
1647 }
1648#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001649
Yisheng Xiecff17202018-03-12 19:25:57 +08001650 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001651}
1652
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001653struct clockid_map {
1654 const char *name;
1655 int clockid;
1656};
1657
1658#define CLOCKID_MAP(n, c) \
1659 { .name = n, .clockid = (c), }
1660
1661#define CLOCKID_END { .name = NULL, }
1662
1663
1664/*
1665 * Add the missing ones, we need to build on many distros...
1666 */
1667#ifndef CLOCK_MONOTONIC_RAW
1668#define CLOCK_MONOTONIC_RAW 4
1669#endif
1670#ifndef CLOCK_BOOTTIME
1671#define CLOCK_BOOTTIME 7
1672#endif
1673#ifndef CLOCK_TAI
1674#define CLOCK_TAI 11
1675#endif
1676
1677static const struct clockid_map clockids[] = {
1678 /* available for all events, NMI safe */
1679 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1680 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1681
1682 /* available for some events */
1683 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1684 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1685 CLOCKID_MAP("tai", CLOCK_TAI),
1686
1687 /* available for the lazy */
1688 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1689 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1690 CLOCKID_MAP("real", CLOCK_REALTIME),
1691 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1692
1693 CLOCKID_END,
1694};
1695
Alexey Budankovcf790512018-10-09 17:36:24 +03001696static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1697{
1698 struct timespec res;
1699
1700 *res_ns = 0;
1701 if (!clock_getres(clk_id, &res))
1702 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1703 else
1704 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1705
1706 return 0;
1707}
1708
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001709static int parse_clockid(const struct option *opt, const char *str, int unset)
1710{
1711 struct record_opts *opts = (struct record_opts *)opt->value;
1712 const struct clockid_map *cm;
1713 const char *ostr = str;
1714
1715 if (unset) {
1716 opts->use_clockid = 0;
1717 return 0;
1718 }
1719
1720 /* no arg passed */
1721 if (!str)
1722 return 0;
1723
1724 /* no setting it twice */
1725 if (opts->use_clockid)
1726 return -1;
1727
1728 opts->use_clockid = true;
1729
1730 /* if its a number, we're done */
1731 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001732 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001733
1734 /* allow a "CLOCK_" prefix to the name */
1735 if (!strncasecmp(str, "CLOCK_", 6))
1736 str += 6;
1737
1738 for (cm = clockids; cm->name; cm++) {
1739 if (!strcasecmp(str, cm->name)) {
1740 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001741 return get_clockid_res(opts->clockid,
1742 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001743 }
1744 }
1745
1746 opts->use_clockid = false;
1747 ui__warning("unknown clockid %s, check man page\n", ostr);
1748 return -1;
1749}
1750
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001751static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1752{
1753 struct record_opts *opts = (struct record_opts *)opt->value;
1754
1755 if (unset || !str)
1756 return 0;
1757
1758 if (!strcasecmp(str, "node"))
1759 opts->affinity = PERF_AFFINITY_NODE;
1760 else if (!strcasecmp(str, "cpu"))
1761 opts->affinity = PERF_AFFINITY_CPU;
1762
1763 return 0;
1764}
1765
Adrian Huntere9db1312015-04-09 18:53:46 +03001766static int record__parse_mmap_pages(const struct option *opt,
1767 const char *str,
1768 int unset __maybe_unused)
1769{
1770 struct record_opts *opts = opt->value;
1771 char *s, *p;
1772 unsigned int mmap_pages;
1773 int ret;
1774
1775 if (!str)
1776 return -EINVAL;
1777
1778 s = strdup(str);
1779 if (!s)
1780 return -ENOMEM;
1781
1782 p = strchr(s, ',');
1783 if (p)
1784 *p = '\0';
1785
1786 if (*s) {
1787 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1788 if (ret)
1789 goto out_free;
1790 opts->mmap_pages = mmap_pages;
1791 }
1792
1793 if (!p) {
1794 ret = 0;
1795 goto out_free;
1796 }
1797
1798 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1799 if (ret)
1800 goto out_free;
1801
1802 opts->auxtrace_mmap_pages = mmap_pages;
1803
1804out_free:
1805 free(s);
1806 return ret;
1807}
1808
Jiri Olsa0c582442017-01-09 10:51:59 +01001809static void switch_output_size_warn(struct record *rec)
1810{
1811 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1812 struct switch_output *s = &rec->switch_output;
1813
1814 wakeup_size /= 2;
1815
1816 if (s->size < wakeup_size) {
1817 char buf[100];
1818
1819 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1820 pr_warning("WARNING: switch-output data size lower than "
1821 "wakeup kernel buffer size (%s) "
1822 "expect bigger perf.data sizes\n", buf);
1823 }
1824}
1825
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001826static int switch_output_setup(struct record *rec)
1827{
1828 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001829 static struct parse_tag tags_size[] = {
1830 { .tag = 'B', .mult = 1 },
1831 { .tag = 'K', .mult = 1 << 10 },
1832 { .tag = 'M', .mult = 1 << 20 },
1833 { .tag = 'G', .mult = 1 << 30 },
1834 { .tag = 0 },
1835 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001836 static struct parse_tag tags_time[] = {
1837 { .tag = 's', .mult = 1 },
1838 { .tag = 'm', .mult = 60 },
1839 { .tag = 'h', .mult = 60*60 },
1840 { .tag = 'd', .mult = 60*60*24 },
1841 { .tag = 0 },
1842 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001843 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001844
1845 if (!s->set)
1846 return 0;
1847
1848 if (!strcmp(s->str, "signal")) {
1849 s->signal = true;
1850 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001851 goto enabled;
1852 }
1853
1854 val = parse_tag_value(s->str, tags_size);
1855 if (val != (unsigned long) -1) {
1856 s->size = val;
1857 pr_debug("switch-output with %s size threshold\n", s->str);
1858 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001859 }
1860
Jiri Olsabfacbe32017-01-09 10:52:00 +01001861 val = parse_tag_value(s->str, tags_time);
1862 if (val != (unsigned long) -1) {
1863 s->time = val;
1864 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1865 s->str, s->time);
1866 goto enabled;
1867 }
1868
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001869 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001870
1871enabled:
1872 rec->timestamp_filename = true;
1873 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001874
1875 if (s->size && !rec->opts.no_buffering)
1876 switch_output_size_warn(rec);
1877
Jiri Olsadc0c6122017-01-09 10:51:58 +01001878 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001879}
1880
Namhyung Kime5b2c202014-10-23 00:15:46 +09001881static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001882 "perf record [<options>] [<command>]",
1883 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001884 NULL
1885};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001886const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001887
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001888/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001889 * XXX Ideally would be local to cmd_record() and passed to a record__new
1890 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001891 * after cmd_record() exits, but since record_options need to be accessible to
1892 * builtin-script, leave it here.
1893 *
1894 * At least we don't ouch it in all the other functions here directly.
1895 *
1896 * Just say no to tons of global variables, sigh.
1897 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001898static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001899 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001900 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001901 .mmap_pages = UINT_MAX,
1902 .user_freq = UINT_MAX,
1903 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001904 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001905 .target = {
1906 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001907 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001908 },
Alexey Budankov470530b2019-03-18 20:40:26 +03001909 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001910 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001911 .tool = {
1912 .sample = process_sample_event,
1913 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001914 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001915 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301916 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001917 .mmap = perf_event__process_mmap,
1918 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001919 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001920 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001921};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001922
Namhyung Kim76a26542015-10-22 23:28:32 +09001923const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1924 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001925
Wang Nan0aab2132016-06-16 08:02:41 +00001926static bool dry_run;
1927
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001928/*
1929 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1930 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001931 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001932 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1933 * using pipes, etc.
1934 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001935static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001936 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001937 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001938 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001939 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001940 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001941 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1942 NULL, "don't record events from perf itself",
1943 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001944 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001945 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001946 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001947 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001948 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001949 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001950 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001951 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001952 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001953 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001954 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001955 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001956 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001957 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001958 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001959 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001960 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001961 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1962 &record.opts.no_inherit_set,
1963 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001964 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1965 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001966 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07001967 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001968 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1969 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001970 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1971 "profile at this frequency",
1972 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001973 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1974 "number of mmap data pages and AUX area tracing mmap pages",
1975 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03001976 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
1977 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
1978 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001979 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001980 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001981 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001982 NULL, "enables call-graph recording" ,
1983 &record_callchain_opt),
1984 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001985 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001986 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001987 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001988 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001989 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001990 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001991 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001992 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001993 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1994 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001995 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001996 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1997 &record.opts.sample_time_set,
1998 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001999 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2000 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002001 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002002 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002003 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2004 &record.no_buildid_cache_set,
2005 "do not update the buildid cache"),
2006 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2007 &record.no_buildid_set,
2008 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002009 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002010 "monitor event in cgroup name only",
2011 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002012 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002013 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002014 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2015 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002016
2017 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2018 "branch any", "sample any taken branches",
2019 parse_branch_stack),
2020
2021 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2022 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002023 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002024 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2025 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002026 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2027 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002028 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2029 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002030 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2031 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002032 " use '-I?' to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002033 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2034 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002035 " use '-I?' to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002036 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2037 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002038 OPT_CALLBACK('k', "clockid", &record.opts,
2039 "clockid", "clockid to use for events, see clock_gettime()",
2040 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002041 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2042 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002043 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002044 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302045 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2046 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002047 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2048 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002049 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2050 "Configure all used events to run in kernel space.",
2051 PARSE_OPT_EXCLUSIVE),
2052 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2053 "Configure all used events to run in user space.",
2054 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00002055 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2056 "clang binary to use for compiling BPF scriptlets"),
2057 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2058 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002059 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2060 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002061 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2062 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002063 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2064 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002065 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2066 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002067 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002068 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2069 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002070 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002071 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2072 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002073 OPT_BOOLEAN(0, "dry-run", &dry_run,
2074 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002075#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002076 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2077 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002078 record__aio_parse),
2079#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002080 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2081 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2082 record__parse_affinity),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002083 OPT_END()
2084};
2085
Namhyung Kime5b2c202014-10-23 00:15:46 +09002086struct option *record_options = __record_options;
2087
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002088int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002089{
Adrian Hunteref149c22015-04-09 18:53:45 +03002090 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002091 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002092 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002093
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002094 setlocale(LC_ALL, "");
2095
Wang Nan48e1cab2015-12-14 10:39:22 +00002096#ifndef HAVE_LIBBPF_SUPPORT
2097# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2098 set_nobuild('\0', "clang-path", true);
2099 set_nobuild('\0', "clang-opt", true);
2100# undef set_nobuild
2101#endif
2102
He Kuang7efe0e02015-12-14 10:39:23 +00002103#ifndef HAVE_BPF_PROLOGUE
2104# if !defined (HAVE_DWARF_SUPPORT)
2105# define REASON "NO_DWARF=1"
2106# elif !defined (HAVE_LIBBPF_SUPPORT)
2107# define REASON "NO_LIBBPF=1"
2108# else
2109# define REASON "this architecture doesn't support BPF prologue"
2110# endif
2111# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2112 set_nobuild('\0', "vmlinux", true);
2113# undef set_nobuild
2114# undef REASON
2115#endif
2116
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002117 CPU_ZERO(&rec->affinity_mask);
2118 rec->opts.affinity = PERF_AFFINITY_SYS;
2119
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002120 rec->evlist = perf_evlist__new();
2121 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002122 return -ENOMEM;
2123
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002124 err = perf_config(perf_record_config, rec);
2125 if (err)
2126 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002127
Tom Zanussibca647a2010-11-10 08:11:30 -06002128 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002129 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002130 if (quiet)
2131 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002132
2133 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002134 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002135 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002136
Namhyung Kimbea03402012-04-26 14:15:15 +09002137 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002138 usage_with_options_msg(record_usage, record_options,
2139 "cgroup monitoring only available in system-wide mode");
2140
Stephane Eranian023695d2011-02-14 11:20:01 +02002141 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002142 if (rec->opts.record_switch_events &&
2143 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002144 ui__error("kernel does not support recording context switch events\n");
2145 parse_options_usage(record_usage, record_options, "switch-events", 0);
2146 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002147 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002148
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002149 if (switch_output_setup(rec)) {
2150 parse_options_usage(record_usage, record_options, "switch-output", 0);
2151 return -EINVAL;
2152 }
2153
Jiri Olsabfacbe32017-01-09 10:52:00 +01002154 if (rec->switch_output.time) {
2155 signal(SIGALRM, alarm_sig_handler);
2156 alarm(rec->switch_output.time);
2157 }
2158
Andi Kleen03724b22019-03-14 15:49:55 -07002159 if (rec->switch_output.num_files) {
2160 rec->switch_output.filenames = calloc(sizeof(char *),
2161 rec->switch_output.num_files);
2162 if (!rec->switch_output.filenames)
2163 return -EINVAL;
2164 }
2165
Adrian Hunter1b36c032016-09-23 17:38:39 +03002166 /*
2167 * Allow aliases to facilitate the lookup of symbols for address
2168 * filters. Refer to auxtrace_parse_filters().
2169 */
2170 symbol_conf.allow_aliases = true;
2171
2172 symbol__init(NULL);
2173
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002174 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002175 if (err)
2176 goto out;
2177
Wang Nan0aab2132016-06-16 08:02:41 +00002178 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002179 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002180
Wang Nand7888572016-04-08 15:07:24 +00002181 err = bpf__setup_stdout(rec->evlist);
2182 if (err) {
2183 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2184 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2185 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002186 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002187 }
2188
Adrian Hunteref149c22015-04-09 18:53:45 +03002189 err = -ENOMEM;
2190
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002191 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002192 pr_warning(
2193"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2194"check /proc/sys/kernel/kptr_restrict.\n\n"
2195"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2196"file is not found in the buildid cache or in the vmlinux path.\n\n"
2197"Samples in kernel modules won't be resolved at all.\n\n"
2198"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2199"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002200
Wang Nan0c1d46a2016-04-20 18:59:52 +00002201 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002202 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002203 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002204 /*
2205 * In 'perf record --switch-output', disable buildid
2206 * generation by default to reduce data file switching
2207 * overhead. Still generate buildid if they are required
2208 * explicitly using
2209 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002210 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002211 * --no-no-buildid-cache
2212 *
2213 * Following code equals to:
2214 *
2215 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2216 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2217 * disable_buildid_cache();
2218 */
2219 bool disable = true;
2220
2221 if (rec->no_buildid_set && !rec->no_buildid)
2222 disable = false;
2223 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2224 disable = false;
2225 if (disable) {
2226 rec->no_buildid = true;
2227 rec->no_buildid_cache = true;
2228 disable_buildid_cache();
2229 }
2230 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002231
Wang Nan4ea648a2016-07-14 08:34:47 +00002232 if (record.opts.overwrite)
2233 record.opts.tail_synthesize = true;
2234
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002235 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002236 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002237 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002238 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002239 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002240
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002241 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2242 rec->opts.no_inherit = true;
2243
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002244 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002245 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002246 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002247 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002248 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002249
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002250 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002251 if (err) {
2252 int saved_errno = errno;
2253
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002254 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002255 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002256
2257 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002258 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002259 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002260
Mengting Zhangca800062017-12-13 15:01:53 +08002261 /* Enable ignoring missing threads when -u/-p option is defined. */
2262 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002263
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002264 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002265 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002266 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002267
Adrian Hunteref149c22015-04-09 18:53:45 +03002268 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2269 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002270 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002271
Namhyung Kim61566812016-01-11 22:37:09 +09002272 /*
2273 * We take all buildids when the file contains
2274 * AUX area tracing data because we do not decode the
2275 * trace because it would take too long.
2276 */
2277 if (rec->opts.full_auxtrace)
2278 rec->buildid_all = true;
2279
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002280 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002281 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002282 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002283 }
2284
Alexey Budankov93f20c02018-11-06 12:07:19 +03002285 if (rec->opts.nr_cblocks > nr_cblocks_max)
2286 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002287 if (verbose > 0)
2288 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2289
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002290 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002291 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002292
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002293 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002294out:
Namhyung Kim45604712014-05-12 09:47:24 +09002295 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002296 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002297 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002298 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002299}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002300
2301static void snapshot_sig_handler(int sig __maybe_unused)
2302{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002303 struct record *rec = &record;
2304
Wang Nan5f9cf592016-04-20 18:59:49 +00002305 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2306 trigger_hit(&auxtrace_snapshot_trigger);
2307 auxtrace_record__snapshot_started = 1;
2308 if (auxtrace_record__snapshot_start(record.itr))
2309 trigger_error(&auxtrace_snapshot_trigger);
2310 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002311
Jiri Olsadc0c6122017-01-09 10:51:58 +01002312 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002313 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002314}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002315
2316static void alarm_sig_handler(int sig __maybe_unused)
2317{
2318 struct record *rec = &record;
2319
2320 if (switch_output_time(rec))
2321 trigger_hit(&switch_output_trigger);
2322}