blob: de9632c69852851c8437a5abe7d21cf81ad7f72d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 struct perf_evlist *evlist;
77 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300136static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
137 void *src, size_t src_size);
138
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300139#ifdef HAVE_AIO_SUPPORT
140static int record__aio_write(struct aiocb *cblock, int trace_fd,
141 void *buf, size_t size, off_t off)
142{
143 int rc;
144
145 cblock->aio_fildes = trace_fd;
146 cblock->aio_buf = buf;
147 cblock->aio_nbytes = size;
148 cblock->aio_offset = off;
149 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
150
151 do {
152 rc = aio_write(cblock);
153 if (rc == 0) {
154 break;
155 } else if (errno != EAGAIN) {
156 cblock->aio_fildes = -1;
157 pr_err("failed to queue perf data, error: %m\n");
158 break;
159 }
160 } while (1);
161
162 return rc;
163}
164
165static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
166{
167 void *rem_buf;
168 off_t rem_off;
169 size_t rem_size;
170 int rc, aio_errno;
171 ssize_t aio_ret, written;
172
173 aio_errno = aio_error(cblock);
174 if (aio_errno == EINPROGRESS)
175 return 0;
176
177 written = aio_ret = aio_return(cblock);
178 if (aio_ret < 0) {
179 if (aio_errno != EINTR)
180 pr_err("failed to write perf data, error: %m\n");
181 written = 0;
182 }
183
184 rem_size = cblock->aio_nbytes - written;
185
186 if (rem_size == 0) {
187 cblock->aio_fildes = -1;
188 /*
189 * md->refcount is incremented in perf_mmap__push() for
190 * every enqueued aio write request so decrement it because
191 * the request is now complete.
192 */
193 perf_mmap__put(md);
194 rc = 1;
195 } else {
196 /*
197 * aio write request may require restart with the
198 * reminder if the kernel didn't write whole
199 * chunk at once.
200 */
201 rem_off = cblock->aio_offset + written;
202 rem_buf = (void *)(cblock->aio_buf + written);
203 record__aio_write(cblock, cblock->aio_fildes,
204 rem_buf, rem_size, rem_off);
205 rc = 0;
206 }
207
208 return rc;
209}
210
Alexey Budankov93f20c02018-11-06 12:07:19 +0300211static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300212{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213 struct aiocb **aiocb = md->aio.aiocb;
214 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300215 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300217
218 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300219 do_suspend = 0;
220 for (i = 0; i < md->aio.nr_cblocks; ++i) {
221 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
222 if (sync_all)
223 aiocb[i] = NULL;
224 else
225 return i;
226 } else {
227 /*
228 * Started aio write is not complete yet
229 * so it has to be waited before the
230 * next allocation.
231 */
232 aiocb[i] = &cblocks[i];
233 do_suspend = 1;
234 }
235 }
236 if (!do_suspend)
237 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300238
Alexey Budankov93f20c02018-11-06 12:07:19 +0300239 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300240 if (!(errno == EAGAIN || errno == EINTR))
241 pr_err("failed to sync perf data, error: %m\n");
242 }
243 } while (1);
244}
245
246static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
247{
248 struct record *rec = to;
249 int ret, trace_fd = rec->session->data->file.fd;
250
251 rec->samples++;
252
253 ret = record__aio_write(cblock, trace_fd, bf, size, off);
254 if (!ret) {
255 rec->bytes_written += size;
256 if (switch_output_size(rec))
257 trigger_hit(&switch_output_trigger);
258 }
259
260 return ret;
261}
262
263static off_t record__aio_get_pos(int trace_fd)
264{
265 return lseek(trace_fd, 0, SEEK_CUR);
266}
267
268static void record__aio_set_pos(int trace_fd, off_t pos)
269{
270 lseek(trace_fd, pos, SEEK_SET);
271}
272
273static void record__aio_mmap_read_sync(struct record *rec)
274{
275 int i;
276 struct perf_evlist *evlist = rec->evlist;
277 struct perf_mmap *maps = evlist->mmap;
278
279 if (!rec->opts.nr_cblocks)
280 return;
281
282 for (i = 0; i < evlist->nr_mmaps; i++) {
283 struct perf_mmap *map = &maps[i];
284
285 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300286 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300287 }
288}
289
290static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300291static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300292
293static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300294 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300295 int unset)
296{
297 struct record_opts *opts = (struct record_opts *)opt->value;
298
Alexey Budankov93f20c02018-11-06 12:07:19 +0300299 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300300 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300301 } else {
302 if (str)
303 opts->nr_cblocks = strtol(str, NULL, 0);
304 if (!opts->nr_cblocks)
305 opts->nr_cblocks = nr_cblocks_default;
306 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300307
308 return 0;
309}
310#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300311static int nr_cblocks_max = 0;
312
313static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300314{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300315 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300316}
317
318static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
319 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
320{
321 return -1;
322}
323
324static off_t record__aio_get_pos(int trace_fd __maybe_unused)
325{
326 return -1;
327}
328
329static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
330{
331}
332
333static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
334{
335}
336#endif
337
338static int record__aio_enabled(struct record *rec)
339{
340 return rec->opts.nr_cblocks > 0;
341}
342
Alexey Budankov470530b2019-03-18 20:40:26 +0300343#define MMAP_FLUSH_DEFAULT 1
344static int record__mmap_flush_parse(const struct option *opt,
345 const char *str,
346 int unset)
347{
348 int flush_max;
349 struct record_opts *opts = (struct record_opts *)opt->value;
350 static struct parse_tag tags[] = {
351 { .tag = 'B', .mult = 1 },
352 { .tag = 'K', .mult = 1 << 10 },
353 { .tag = 'M', .mult = 1 << 20 },
354 { .tag = 'G', .mult = 1 << 30 },
355 { .tag = 0 },
356 };
357
358 if (unset)
359 return 0;
360
361 if (str) {
362 opts->mmap_flush = parse_tag_value(str, tags);
363 if (opts->mmap_flush == (int)-1)
364 opts->mmap_flush = strtol(str, NULL, 0);
365 }
366
367 if (!opts->mmap_flush)
368 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
369
370 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
371 flush_max /= 4;
372 if (opts->mmap_flush > flush_max)
373 opts->mmap_flush = flush_max;
374
375 return 0;
376}
377
Alexey Budankov51255a82019-03-18 20:42:19 +0300378static unsigned int comp_level_max = 22;
379
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300380static int record__comp_enabled(struct record *rec)
381{
382 return rec->opts.comp_level > 0;
383}
384
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200385static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200386 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300387 struct perf_sample *sample __maybe_unused,
388 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200389{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300390 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200391 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200392}
393
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200394static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300395{
396 struct record *rec = to;
397
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300398 if (record__comp_enabled(rec)) {
399 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
400 bf = map->data;
401 }
402
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300403 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200404 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300405}
406
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300407static volatile int done;
408static volatile int signr = -1;
409static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000410
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300411static void sig_handler(int sig)
412{
413 if (sig == SIGCHLD)
414 child_finished = 1;
415 else
416 signr = sig;
417
418 done = 1;
419}
420
Wang Nana0748652016-11-26 07:03:28 +0000421static void sigsegv_handler(int sig)
422{
423 perf_hooks__recover();
424 sighandler_dump_stack(sig);
425}
426
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300427static void record__sig_exit(void)
428{
429 if (signr == -1)
430 return;
431
432 signal(signr, SIG_DFL);
433 raise(signr);
434}
435
Adrian Huntere31f0d02015-04-30 17:37:27 +0300436#ifdef HAVE_AUXTRACE_SUPPORT
437
Adrian Hunteref149c22015-04-09 18:53:45 +0300438static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200439 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300440 union perf_event *event, void *data1,
441 size_t len1, void *data2, size_t len2)
442{
443 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100444 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300445 size_t padding;
446 u8 pad[8] = {0};
447
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100448 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300449 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100450 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300451 int err;
452
453 file_offset = lseek(fd, 0, SEEK_CUR);
454 if (file_offset == -1)
455 return -1;
456 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
457 event, file_offset);
458 if (err)
459 return err;
460 }
461
Adrian Hunteref149c22015-04-09 18:53:45 +0300462 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
463 padding = (len1 + len2) & 7;
464 if (padding)
465 padding = 8 - padding;
466
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200467 record__write(rec, map, event, event->header.size);
468 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300469 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200470 record__write(rec, map, data2, len2);
471 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300472
473 return 0;
474}
475
476static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200477 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300478{
479 int ret;
480
Jiri Olsae035f4c2018-09-13 14:54:05 +0200481 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300482 record__process_auxtrace);
483 if (ret < 0)
484 return ret;
485
486 if (ret)
487 rec->samples++;
488
489 return 0;
490}
491
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300492static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200493 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300494{
495 int ret;
496
Jiri Olsae035f4c2018-09-13 14:54:05 +0200497 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300498 record__process_auxtrace,
499 rec->opts.auxtrace_snapshot_size);
500 if (ret < 0)
501 return ret;
502
503 if (ret)
504 rec->samples++;
505
506 return 0;
507}
508
509static int record__auxtrace_read_snapshot_all(struct record *rec)
510{
511 int i;
512 int rc = 0;
513
514 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200515 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300516
Jiri Olsae035f4c2018-09-13 14:54:05 +0200517 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300518 continue;
519
Jiri Olsae035f4c2018-09-13 14:54:05 +0200520 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300521 rc = -1;
522 goto out;
523 }
524 }
525out:
526 return rc;
527}
528
529static void record__read_auxtrace_snapshot(struct record *rec)
530{
531 pr_debug("Recording AUX area tracing snapshot\n");
532 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000533 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300534 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000535 if (auxtrace_record__snapshot_finish(rec->itr))
536 trigger_error(&auxtrace_snapshot_trigger);
537 else
538 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300539 }
540}
541
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200542static int record__auxtrace_init(struct record *rec)
543{
544 int err;
545
546 if (!rec->itr) {
547 rec->itr = auxtrace_record__init(rec->evlist, &err);
548 if (err)
549 return err;
550 }
551
552 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
553 rec->opts.auxtrace_snapshot_opts);
554 if (err)
555 return err;
556
557 return auxtrace_parse_filters(rec->evlist);
558}
559
Adrian Huntere31f0d02015-04-30 17:37:27 +0300560#else
561
562static inline
563int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200564 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300565{
566 return 0;
567}
568
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300569static inline
570void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
571{
572}
573
574static inline
575int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
576{
577 return 0;
578}
579
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200580static int record__auxtrace_init(struct record *rec __maybe_unused)
581{
582 return 0;
583}
584
Adrian Huntere31f0d02015-04-30 17:37:27 +0300585#endif
586
Wang Nancda57a82016-06-27 10:24:03 +0000587static int record__mmap_evlist(struct record *rec,
588 struct perf_evlist *evlist)
589{
590 struct record_opts *opts = &rec->opts;
591 char msg[512];
592
Alexey Budankovf13de662019-01-22 20:50:57 +0300593 if (opts->affinity != PERF_AFFINITY_SYS)
594 cpu__setup_cpunode_map();
595
Wang Nan7a276ff2017-12-03 02:00:38 +0000596 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000597 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300598 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300599 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300600 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000601 if (errno == EPERM) {
602 pr_err("Permission error mapping pages.\n"
603 "Consider increasing "
604 "/proc/sys/kernel/perf_event_mlock_kb,\n"
605 "or try again with a smaller value of -m/--mmap_pages.\n"
606 "(current value: %u,%u)\n",
607 opts->mmap_pages, opts->auxtrace_mmap_pages);
608 return -errno;
609 } else {
610 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300611 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000612 if (errno)
613 return -errno;
614 else
615 return -EINVAL;
616 }
617 }
618 return 0;
619}
620
621static int record__mmap(struct record *rec)
622{
623 return record__mmap_evlist(rec, rec->evlist);
624}
625
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300626static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200627{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300628 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200629 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200630 struct perf_evlist *evlist = rec->evlist;
631 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300632 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600633 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200634
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300635 /*
636 * For initial_delay we need to add a dummy event so that we can track
637 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
638 * real events, the ones asked by the user.
639 */
640 if (opts->initial_delay) {
641 if (perf_evlist__add_dummy(evlist))
642 return -ENOMEM;
643
644 pos = perf_evlist__first(evlist);
645 pos->tracking = 0;
646 pos = perf_evlist__last(evlist);
647 pos->tracking = 1;
648 pos->attr.enable_on_exec = 1;
649 }
650
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300651 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100652
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300653 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200654try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400655 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300656 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900657 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300658 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300659 goto try_again;
660 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700661 if ((errno == EINVAL || errno == EBADF) &&
662 pos->leader != pos &&
663 pos->weak_group) {
664 pos = perf_evlist__reset_weak_group(evlist, pos);
665 goto try_again;
666 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300667 rc = -errno;
668 perf_evsel__open_strerror(pos, &opts->target,
669 errno, msg, sizeof(msg));
670 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600671 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300672 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800673
674 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800675 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200676
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300677 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300678 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300679 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300680 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600681 rc = -1;
682 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100683 }
684
Wang Nancda57a82016-06-27 10:24:03 +0000685 rc = record__mmap(rec);
686 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600687 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200688
Jiri Olsa563aecb2013-06-05 13:35:06 +0200689 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300690 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600691out:
692 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200693}
694
Namhyung Kime3d59112015-01-29 17:06:44 +0900695static int process_sample_event(struct perf_tool *tool,
696 union perf_event *event,
697 struct perf_sample *sample,
698 struct perf_evsel *evsel,
699 struct machine *machine)
700{
701 struct record *rec = container_of(tool, struct record, tool);
702
Jin Yao68588ba2017-12-08 21:13:42 +0800703 if (rec->evlist->first_sample_time == 0)
704 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900705
Jin Yao68588ba2017-12-08 21:13:42 +0800706 rec->evlist->last_sample_time = sample->time;
707
708 if (rec->buildid_all)
709 return 0;
710
711 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900712 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
713}
714
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300715static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200716{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200717 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200718
Jiri Olsa45112e82019-02-21 10:41:29 +0100719 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300720 return 0;
721
Namhyung Kim00dc8652014-11-04 10:14:32 +0900722 /*
723 * During this process, it'll load kernel map and replace the
724 * dso->long_name to a real pathname it found. In this case
725 * we prefer the vmlinux path like
726 * /lib/modules/3.16.4/build/vmlinux
727 *
728 * rather than build-id path (in debug directory).
729 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
730 */
731 symbol_conf.ignore_vmlinux_buildid = true;
732
Namhyung Kim61566812016-01-11 22:37:09 +0900733 /*
734 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800735 * so no need to process samples. But if timestamp_boundary is enabled,
736 * it still needs to walk on all samples to get the timestamps of
737 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900738 */
Jin Yao68588ba2017-12-08 21:13:42 +0800739 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900740 rec->tool.sample = NULL;
741
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300742 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200743}
744
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200745static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800746{
747 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200748 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800749 /*
750 *As for guest kernel when processing subcommand record&report,
751 *we arrange module mmap prior to guest kernel mmap and trigger
752 *a preload dso because default guest module symbols are loaded
753 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
754 *method is used to avoid symbol missing when the first addr is
755 *in module instead of in guest kernel.
756 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200757 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200758 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800759 if (err < 0)
760 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300761 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800762
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800763 /*
764 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
765 * have no _text sometimes.
766 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200767 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200768 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800769 if (err < 0)
770 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300771 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800772}
773
Frederic Weisbecker98402802010-05-02 22:05:29 +0200774static struct perf_event_header finished_round_event = {
775 .size = sizeof(struct perf_event_header),
776 .type = PERF_RECORD_FINISHED_ROUND,
777};
778
Alexey Budankovf13de662019-01-22 20:50:57 +0300779static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
780{
781 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
782 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
783 CPU_ZERO(&rec->affinity_mask);
784 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
785 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
786 }
787}
788
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300789static size_t process_comp_header(void *record, size_t increment)
790{
791 struct compressed_event *event = record;
792 size_t size = sizeof(*event);
793
794 if (increment) {
795 event->header.size += increment;
796 return increment;
797 }
798
799 event->header.type = PERF_RECORD_COMPRESSED;
800 event->header.size = size;
801
802 return size;
803}
804
805static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
806 void *src, size_t src_size)
807{
808 size_t compressed;
809 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct compressed_event) - 1;
810
811 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
812 max_record_size, process_comp_header);
813
814 session->bytes_transferred += src_size;
815 session->bytes_compressed += compressed;
816
817 return compressed;
818}
819
Wang Nana4ea0ec2016-07-14 08:34:36 +0000820static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300821 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200822{
Jiri Olsadcabb502014-07-25 16:56:16 +0200823 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200824 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600825 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000826 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300827 int trace_fd = rec->data.file.fd;
828 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200829
Wang Nancb216862016-06-27 10:24:04 +0000830 if (!evlist)
831 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300832
Wang Nan0b72d692017-12-04 16:51:07 +0000833 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000834 if (!maps)
835 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000836
Wang Nan0b72d692017-12-04 16:51:07 +0000837 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000838 return 0;
839
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300840 if (record__aio_enabled(rec))
841 off = record__aio_get_pos(trace_fd);
842
Wang Nana4ea0ec2016-07-14 08:34:36 +0000843 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300844 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200845 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000846
Jiri Olsae035f4c2018-09-13 14:54:05 +0200847 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300848 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300849 if (synch) {
850 flush = map->flush;
851 map->flush = 1;
852 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300853 if (!record__aio_enabled(rec)) {
854 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300855 if (synch)
856 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300857 rc = -1;
858 goto out;
859 }
860 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300861 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300862 /*
863 * Call record__aio_sync() to wait till map->data buffer
864 * becomes available after previous aio write request.
865 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300866 idx = record__aio_sync(map, false);
867 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300868 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300869 if (synch)
870 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300871 rc = -1;
872 goto out;
873 }
David Ahern8d3eca22012-08-26 12:24:47 -0600874 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300875 if (synch)
876 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600877 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300878
Jiri Olsae035f4c2018-09-13 14:54:05 +0200879 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
880 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300881 rc = -1;
882 goto out;
883 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200884 }
885
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300886 if (record__aio_enabled(rec))
887 record__aio_set_pos(trace_fd, off);
888
Jiri Olsadcabb502014-07-25 16:56:16 +0200889 /*
890 * Mark the round finished in case we wrote
891 * at least one event.
892 */
893 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200894 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600895
Wang Nan0b72d692017-12-04 16:51:07 +0000896 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000897 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600898out:
899 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200900}
901
Alexey Budankov470530b2019-03-18 20:40:26 +0300902static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +0000903{
904 int err;
905
Alexey Budankov470530b2019-03-18 20:40:26 +0300906 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +0000907 if (err)
908 return err;
909
Alexey Budankov470530b2019-03-18 20:40:26 +0300910 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +0000911}
912
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300913static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700914{
David Ahern57706ab2013-11-06 11:41:34 -0700915 struct perf_session *session = rec->session;
916 int feat;
917
918 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
919 perf_header__set_feat(&session->header, feat);
920
921 if (rec->no_buildid)
922 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
923
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300924 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700925 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
926
927 if (!rec->opts.branch_stack)
928 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300929
930 if (!rec->opts.full_auxtrace)
931 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100932
Alexey Budankovcf790512018-10-09 17:36:24 +0300933 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
934 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
935
Jiri Olsa258031c2019-03-08 14:47:39 +0100936 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300937 if (!record__comp_enabled(rec))
938 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +0100939
Jiri Olsaffa517a2015-10-25 15:51:43 +0100940 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700941}
942
Wang Nane1ab48b2016-02-26 09:32:10 +0000943static void
944record__finish_output(struct record *rec)
945{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100946 struct perf_data *data = &rec->data;
947 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000948
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100949 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000950 return;
951
952 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +0100953 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000954
955 if (!rec->no_buildid) {
956 process_buildids(rec);
957
958 if (rec->buildid_all)
959 dsos__hit_all(rec->session);
960 }
961 perf_session__write_header(rec->session, rec->evlist, fd, true);
962
963 return;
964}
965
Wang Nan4ea648a2016-07-14 08:34:47 +0000966static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000967{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300968 int err;
969 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000970
Wang Nan4ea648a2016-07-14 08:34:47 +0000971 if (rec->opts.tail_synthesize != tail)
972 return 0;
973
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300974 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
975 if (thread_map == NULL)
976 return -1;
977
978 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000979 process_synthesized_event,
980 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800981 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300982 thread_map__put(thread_map);
983 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000984}
985
Wang Nan4ea648a2016-07-14 08:34:47 +0000986static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000987
Wang Nanecfd7a92016-04-13 08:21:07 +0000988static int
989record__switch_output(struct record *rec, bool at_exit)
990{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100991 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000992 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -0700993 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +0000994
995 /* Same Size: "2015122520103046"*/
996 char timestamp[] = "InvalidTimestamp";
997
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300998 record__aio_mmap_read_sync(rec);
999
Wang Nan4ea648a2016-07-14 08:34:47 +00001000 record__synthesize(rec, true);
1001 if (target__none(&rec->opts.target))
1002 record__synthesize_workload(rec, true);
1003
Wang Nanecfd7a92016-04-13 08:21:07 +00001004 rec->samples = 0;
1005 record__finish_output(rec);
1006 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1007 if (err) {
1008 pr_err("Failed to get current timestamp\n");
1009 return -EINVAL;
1010 }
1011
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001012 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001013 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001014 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001015 if (fd >= 0 && !at_exit) {
1016 rec->bytes_written = 0;
1017 rec->session->header.data_size = 0;
1018 }
1019
1020 if (!quiet)
1021 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001022 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001023
Andi Kleen03724b22019-03-14 15:49:55 -07001024 if (rec->switch_output.num_files) {
1025 int n = rec->switch_output.cur_file + 1;
1026
1027 if (n >= rec->switch_output.num_files)
1028 n = 0;
1029 rec->switch_output.cur_file = n;
1030 if (rec->switch_output.filenames[n]) {
1031 remove(rec->switch_output.filenames[n]);
1032 free(rec->switch_output.filenames[n]);
1033 }
1034 rec->switch_output.filenames[n] = new_filename;
1035 } else {
1036 free(new_filename);
1037 }
1038
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001039 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001040 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001041 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001042
Wang Nanbe7b0c92016-04-20 18:59:54 +00001043 /*
1044 * In 'perf record --switch-output' without -a,
1045 * record__synthesize() in record__switch_output() won't
1046 * generate tracking events because there's no thread_map
1047 * in evlist. Which causes newly created perf.data doesn't
1048 * contain map and comm information.
1049 * Create a fake thread_map and directly call
1050 * perf_event__synthesize_thread_map() for those events.
1051 */
1052 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001053 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001054 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001055 return fd;
1056}
1057
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001058static volatile int workload_exec_errno;
1059
1060/*
1061 * perf_evlist__prepare_workload will send a SIGUSR1
1062 * if the fork fails, since we asked by setting its
1063 * want_signal to true.
1064 */
Namhyung Kim45604712014-05-12 09:47:24 +09001065static void workload_exec_failed_signal(int signo __maybe_unused,
1066 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001067 void *ucontext __maybe_unused)
1068{
1069 workload_exec_errno = info->si_value.sival_int;
1070 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001071 child_finished = 1;
1072}
1073
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001074static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001075static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001076
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001077int __weak
1078perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1079 struct perf_tool *tool __maybe_unused,
1080 perf_event__handler_t process __maybe_unused,
1081 struct machine *machine __maybe_unused)
1082{
1083 return 0;
1084}
1085
Wang Nanee667f92016-06-27 10:24:05 +00001086static const struct perf_event_mmap_page *
1087perf_evlist__pick_pc(struct perf_evlist *evlist)
1088{
Wang Nanb2cb6152016-07-14 08:34:39 +00001089 if (evlist) {
1090 if (evlist->mmap && evlist->mmap[0].base)
1091 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001092 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1093 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001094 }
Wang Nanee667f92016-06-27 10:24:05 +00001095 return NULL;
1096}
1097
Wang Nanc45628b2016-05-24 02:28:59 +00001098static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1099{
Wang Nanee667f92016-06-27 10:24:05 +00001100 const struct perf_event_mmap_page *pc;
1101
1102 pc = perf_evlist__pick_pc(rec->evlist);
1103 if (pc)
1104 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001105 return NULL;
1106}
1107
Wang Nan4ea648a2016-07-14 08:34:47 +00001108static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001109{
1110 struct perf_session *session = rec->session;
1111 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001112 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001113 struct record_opts *opts = &rec->opts;
1114 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001115 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001116 int err = 0;
1117
Wang Nan4ea648a2016-07-14 08:34:47 +00001118 if (rec->opts.tail_synthesize != tail)
1119 return 0;
1120
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001121 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001122 /*
1123 * We need to synthesize events first, because some
1124 * features works on top of them (on report side).
1125 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001126 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001127 process_synthesized_event);
1128 if (err < 0) {
1129 pr_err("Couldn't synthesize attrs.\n");
1130 goto out;
1131 }
1132
Jiri Olsaa2015512018-03-14 10:22:04 +01001133 err = perf_event__synthesize_features(tool, session, rec->evlist,
1134 process_synthesized_event);
1135 if (err < 0) {
1136 pr_err("Couldn't synthesize features.\n");
1137 return err;
1138 }
1139
Wang Nanc45c86e2016-02-26 09:32:07 +00001140 if (have_tracepoints(&rec->evlist->entries)) {
1141 /*
1142 * FIXME err <= 0 here actually means that
1143 * there were no tracepoints so its not really
1144 * an error, just that we don't need to
1145 * synthesize anything. We really have to
1146 * return this more properly and also
1147 * propagate errors that now are calling die()
1148 */
1149 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1150 process_synthesized_event);
1151 if (err <= 0) {
1152 pr_err("Couldn't record tracing data.\n");
1153 goto out;
1154 }
1155 rec->bytes_written += err;
1156 }
1157 }
1158
Wang Nanc45628b2016-05-24 02:28:59 +00001159 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001160 process_synthesized_event, machine);
1161 if (err)
1162 goto out;
1163
Wang Nanc45c86e2016-02-26 09:32:07 +00001164 if (rec->opts.full_auxtrace) {
1165 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1166 session, process_synthesized_event);
1167 if (err)
1168 goto out;
1169 }
1170
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001171 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1172 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1173 machine);
1174 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1175 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1176 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001177
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001178 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1179 machine);
1180 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1181 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1182 "Check /proc/modules permission or run as root.\n");
1183 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001184
1185 if (perf_guest) {
1186 machines__process_guests(&session->machines,
1187 perf_event__synthesize_guest_os, tool);
1188 }
1189
Andi Kleenbfd8f722017-11-17 13:42:58 -08001190 err = perf_event__synthesize_extra_attr(&rec->tool,
1191 rec->evlist,
1192 process_synthesized_event,
1193 data->is_pipe);
1194 if (err)
1195 goto out;
1196
Andi Kleen373565d2017-11-17 13:42:59 -08001197 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1198 process_synthesized_event,
1199 NULL);
1200 if (err < 0) {
1201 pr_err("Couldn't synthesize thread map.\n");
1202 return err;
1203 }
1204
1205 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1206 process_synthesized_event, NULL);
1207 if (err < 0) {
1208 pr_err("Couldn't synthesize cpu map.\n");
1209 return err;
1210 }
1211
Song Liue5416952019-03-11 22:30:41 -07001212 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001213 machine, opts);
1214 if (err < 0)
1215 pr_warning("Couldn't synthesize bpf events.\n");
1216
Wang Nanc45c86e2016-02-26 09:32:07 +00001217 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1218 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001219 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001220out:
1221 return err;
1222}
1223
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001224static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001225{
David Ahern57706ab2013-11-06 11:41:34 -07001226 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001227 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001228 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001229 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001230 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001231 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001232 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001233 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001234 bool disabled = false, draining = false;
Song Liu657ee552019-03-11 22:30:50 -07001235 struct perf_evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001236 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001237 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001238
Namhyung Kim45604712014-05-12 09:47:24 +09001239 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001240 signal(SIGCHLD, sig_handler);
1241 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001242 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001243 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001244
Hari Bathinif3b36142017-03-08 02:11:43 +05301245 if (rec->opts.record_namespaces)
1246 tool->namespace_events = true;
1247
Jiri Olsadc0c6122017-01-09 10:51:58 +01001248 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001249 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001250 if (rec->opts.auxtrace_snapshot_mode)
1251 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001252 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001253 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001254 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001255 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001256 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001257
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001258 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001259 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001260 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001261 return -1;
1262 }
1263
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001264 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001265 rec->session = session;
1266
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001267 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1268 pr_err("Compression initialization failed.\n");
1269 return -1;
1270 }
1271
1272 session->header.env.comp_type = PERF_COMP_ZSTD;
1273 session->header.env.comp_level = rec->opts.comp_level;
1274
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001275 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001276
Alexey Budankovcf790512018-10-09 17:36:24 +03001277 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1278 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1279
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001280 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001281 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001282 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001283 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001284 if (err < 0) {
1285 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001286 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001287 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001288 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001289 }
1290
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001291 /*
1292 * If we have just single event and are sending data
1293 * through pipe, we need to force the ids allocation,
1294 * because we synthesize event name through the pipe
1295 * and need the id for that.
1296 */
1297 if (data->is_pipe && rec->evlist->nr_entries == 1)
1298 rec->opts.sample_id = true;
1299
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001300 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001301 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001302 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001303 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001304 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001305
Wang Nan8690a2a2016-02-22 09:10:32 +00001306 err = bpf__apply_obj_config();
1307 if (err) {
1308 char errbuf[BUFSIZ];
1309
1310 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1311 pr_err("ERROR: Apply config to BPF failed: %s\n",
1312 errbuf);
1313 goto out_child;
1314 }
1315
Adrian Huntercca84822015-08-19 17:29:21 +03001316 /*
1317 * Normally perf_session__new would do this, but it doesn't have the
1318 * evlist.
1319 */
1320 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1321 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1322 rec->tool.ordered_events = false;
1323 }
1324
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001325 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001326 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1327
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001328 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001329 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001330 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001331 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001332 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001333 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001334 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001335 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001336 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001337
David Ahernd3665492012-02-06 15:27:52 -07001338 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001339 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001340 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001341 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001342 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001343 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001344 }
1345
Song Liud56354d2019-03-11 22:30:51 -07001346 if (!opts->no_bpf_event)
1347 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1348
Song Liu657ee552019-03-11 22:30:50 -07001349 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1350 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1351 opts->no_bpf_event = true;
1352 }
1353
Wang Nan4ea648a2016-07-14 08:34:47 +00001354 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001355 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001356 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001357
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001358 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001359 struct sched_param param;
1360
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001361 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001362 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001363 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001364 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001365 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001366 }
1367 }
1368
Jiri Olsa774cb492012-11-12 18:34:01 +01001369 /*
1370 * When perf is starting the traced process, all the events
1371 * (apart from group members) have enable_on_exec=1 set,
1372 * so don't spoil it by prematurely enabling them.
1373 */
Andi Kleen6619a532014-01-11 13:38:27 -08001374 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001375 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001376
Peter Zijlstra856e9662009-12-16 17:55:55 +01001377 /*
1378 * Let the child rip
1379 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001380 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001381 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001382 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301383 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001384
1385 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1386 if (event == NULL) {
1387 err = -ENOMEM;
1388 goto out_child;
1389 }
1390
Namhyung Kime803cf92015-09-22 09:24:55 +09001391 /*
1392 * Some H/W events are generated before COMM event
1393 * which is emitted during exec(), so perf script
1394 * cannot see a correct process name for those events.
1395 * Synthesize COMM event to prevent it.
1396 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301397 tgid = perf_event__synthesize_comm(tool, event,
1398 rec->evlist->workload.pid,
1399 process_synthesized_event,
1400 machine);
1401 free(event);
1402
1403 if (tgid == -1)
1404 goto out_child;
1405
1406 event = malloc(sizeof(event->namespaces) +
1407 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1408 machine->id_hdr_size);
1409 if (event == NULL) {
1410 err = -ENOMEM;
1411 goto out_child;
1412 }
1413
1414 /*
1415 * Synthesize NAMESPACES event for the command specified.
1416 */
1417 perf_event__synthesize_namespaces(tool, event,
1418 rec->evlist->workload.pid,
1419 tgid, process_synthesized_event,
1420 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001421 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001422
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001423 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001424 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001425
Andi Kleen6619a532014-01-11 13:38:27 -08001426 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001427 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001428 perf_evlist__enable(rec->evlist);
1429 }
1430
Wang Nan5f9cf592016-04-20 18:59:49 +00001431 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001432 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001433 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001434 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001435 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001436
Wang Nan057374642016-07-14 08:34:43 +00001437 /*
1438 * rec->evlist->bkw_mmap_state is possible to be
1439 * BKW_MMAP_EMPTY here: when done == true and
1440 * hits != rec->samples in previous round.
1441 *
1442 * perf_evlist__toggle_bkw_mmap ensure we never
1443 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1444 */
1445 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1446 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1447
Alexey Budankov470530b2019-03-18 20:40:26 +03001448 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001449 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001450 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001451 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001452 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001453 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001454
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001455 if (auxtrace_record__snapshot_started) {
1456 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001457 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001458 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001459 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001460 pr_err("AUX area tracing snapshot failed\n");
1461 err = -1;
1462 goto out_child;
1463 }
1464 }
1465
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001466 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001467 /*
1468 * If switch_output_trigger is hit, the data in
1469 * overwritable ring buffer should have been collected,
1470 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1471 *
1472 * If SIGUSR2 raise after or during record__mmap_read_all(),
1473 * record__mmap_read_all() didn't collect data from
1474 * overwritable ring buffer. Read again.
1475 */
1476 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1477 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001478 trigger_ready(&switch_output_trigger);
1479
Wang Nan057374642016-07-14 08:34:43 +00001480 /*
1481 * Reenable events in overwrite ring buffer after
1482 * record__mmap_read_all(): we should have collected
1483 * data from it.
1484 */
1485 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1486
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001487 if (!quiet)
1488 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1489 waking);
1490 waking = 0;
1491 fd = record__switch_output(rec, false);
1492 if (fd < 0) {
1493 pr_err("Failed to switch to new file\n");
1494 trigger_error(&switch_output_trigger);
1495 err = fd;
1496 goto out_child;
1497 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001498
1499 /* re-arm the alarm */
1500 if (rec->switch_output.time)
1501 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001502 }
1503
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001504 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001505 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001506 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001507 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001508 /*
1509 * Propagate error, only if there's any. Ignore positive
1510 * number of returned events and interrupt error.
1511 */
1512 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001513 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001514 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001515
1516 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1517 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001518 }
1519
Jiri Olsa774cb492012-11-12 18:34:01 +01001520 /*
1521 * When perf is starting the traced process, at the end events
1522 * die with the process and we wait for that. Thus no need to
1523 * disable events in this case.
1524 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001525 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001526 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001527 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001528 disabled = true;
1529 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001530 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001531 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001532 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001533
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001534 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001535 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001536 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001537 pr_err("Workload failed: %s\n", emsg);
1538 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001539 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001540 }
1541
Namhyung Kime3d59112015-01-29 17:06:44 +09001542 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001543 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001544
Wang Nan4ea648a2016-07-14 08:34:47 +00001545 if (target__none(&rec->opts.target))
1546 record__synthesize_workload(rec, true);
1547
Namhyung Kim45604712014-05-12 09:47:24 +09001548out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001549 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001550 record__aio_mmap_read_sync(rec);
1551
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001552 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1553 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1554 session->header.env.comp_ratio = ratio + 0.5;
1555 }
1556
Namhyung Kim45604712014-05-12 09:47:24 +09001557 if (forks) {
1558 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001559
Namhyung Kim45604712014-05-12 09:47:24 +09001560 if (!child_finished)
1561 kill(rec->evlist->workload.pid, SIGTERM);
1562
1563 wait(&exit_status);
1564
1565 if (err < 0)
1566 status = err;
1567 else if (WIFEXITED(exit_status))
1568 status = WEXITSTATUS(exit_status);
1569 else if (WIFSIGNALED(exit_status))
1570 signr = WTERMSIG(exit_status);
1571 } else
1572 status = err;
1573
Wang Nan4ea648a2016-07-14 08:34:47 +00001574 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001575 /* this will be recalculated during process_buildids() */
1576 rec->samples = 0;
1577
Wang Nanecfd7a92016-04-13 08:21:07 +00001578 if (!err) {
1579 if (!rec->timestamp_filename) {
1580 record__finish_output(rec);
1581 } else {
1582 fd = record__switch_output(rec, true);
1583 if (fd < 0) {
1584 status = fd;
1585 goto out_delete_session;
1586 }
1587 }
1588 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001589
Wang Nana0748652016-11-26 07:03:28 +00001590 perf_hooks__invoke_record_end();
1591
Namhyung Kime3d59112015-01-29 17:06:44 +09001592 if (!err && !quiet) {
1593 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001594 const char *postfix = rec->timestamp_filename ?
1595 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001596
Adrian Hunteref149c22015-04-09 18:53:45 +03001597 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001598 scnprintf(samples, sizeof(samples),
1599 " (%" PRIu64 " samples)", rec->samples);
1600 else
1601 samples[0] = '\0';
1602
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001603 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001604 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001605 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001606 if (ratio) {
1607 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1608 rec->session->bytes_transferred / 1024.0 / 1024.0,
1609 ratio);
1610 }
1611 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001612 }
1613
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001614out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001615 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001616 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001617
1618 if (!opts->no_bpf_event)
1619 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001620 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001621}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001622
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001623static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001624{
Kan Liangaad2b212015-01-05 13:23:04 -05001625 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001626
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001627 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001628
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001629 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001630 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001631 callchain->dump_size);
1632}
1633
1634int record_opts__parse_callchain(struct record_opts *record,
1635 struct callchain_param *callchain,
1636 const char *arg, bool unset)
1637{
1638 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001639 callchain->enabled = !unset;
1640
1641 /* --no-call-graph */
1642 if (unset) {
1643 callchain->record_mode = CALLCHAIN_NONE;
1644 pr_debug("callchain: disabled\n");
1645 return 0;
1646 }
1647
1648 ret = parse_callchain_record_opt(arg, callchain);
1649 if (!ret) {
1650 /* Enable data address sampling for DWARF unwind. */
1651 if (callchain->record_mode == CALLCHAIN_DWARF)
1652 record->sample_address = true;
1653 callchain_debug(callchain);
1654 }
1655
1656 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001657}
1658
Kan Liangc421e802015-07-29 05:42:12 -04001659int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001660 const char *arg,
1661 int unset)
1662{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001663 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001664}
1665
Kan Liangc421e802015-07-29 05:42:12 -04001666int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001667 const char *arg __maybe_unused,
1668 int unset __maybe_unused)
1669{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001670 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001671
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001672 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001673
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001674 if (callchain->record_mode == CALLCHAIN_NONE)
1675 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001676
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001677 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001678 return 0;
1679}
1680
Jiri Olsaeb853e82014-02-03 12:44:42 +01001681static int perf_record_config(const char *var, const char *value, void *cb)
1682{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001683 struct record *rec = cb;
1684
1685 if (!strcmp(var, "record.build-id")) {
1686 if (!strcmp(value, "cache"))
1687 rec->no_buildid_cache = false;
1688 else if (!strcmp(value, "no-cache"))
1689 rec->no_buildid_cache = true;
1690 else if (!strcmp(value, "skip"))
1691 rec->no_buildid = true;
1692 else
1693 return -1;
1694 return 0;
1695 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001696 if (!strcmp(var, "record.call-graph")) {
1697 var = "call-graph.record-mode";
1698 return perf_default_config(var, value, cb);
1699 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001700#ifdef HAVE_AIO_SUPPORT
1701 if (!strcmp(var, "record.aio")) {
1702 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1703 if (!rec->opts.nr_cblocks)
1704 rec->opts.nr_cblocks = nr_cblocks_default;
1705 }
1706#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001707
Yisheng Xiecff17202018-03-12 19:25:57 +08001708 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001709}
1710
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001711struct clockid_map {
1712 const char *name;
1713 int clockid;
1714};
1715
1716#define CLOCKID_MAP(n, c) \
1717 { .name = n, .clockid = (c), }
1718
1719#define CLOCKID_END { .name = NULL, }
1720
1721
1722/*
1723 * Add the missing ones, we need to build on many distros...
1724 */
1725#ifndef CLOCK_MONOTONIC_RAW
1726#define CLOCK_MONOTONIC_RAW 4
1727#endif
1728#ifndef CLOCK_BOOTTIME
1729#define CLOCK_BOOTTIME 7
1730#endif
1731#ifndef CLOCK_TAI
1732#define CLOCK_TAI 11
1733#endif
1734
1735static const struct clockid_map clockids[] = {
1736 /* available for all events, NMI safe */
1737 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1738 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1739
1740 /* available for some events */
1741 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1742 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1743 CLOCKID_MAP("tai", CLOCK_TAI),
1744
1745 /* available for the lazy */
1746 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1747 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1748 CLOCKID_MAP("real", CLOCK_REALTIME),
1749 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1750
1751 CLOCKID_END,
1752};
1753
Alexey Budankovcf790512018-10-09 17:36:24 +03001754static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1755{
1756 struct timespec res;
1757
1758 *res_ns = 0;
1759 if (!clock_getres(clk_id, &res))
1760 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1761 else
1762 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1763
1764 return 0;
1765}
1766
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001767static int parse_clockid(const struct option *opt, const char *str, int unset)
1768{
1769 struct record_opts *opts = (struct record_opts *)opt->value;
1770 const struct clockid_map *cm;
1771 const char *ostr = str;
1772
1773 if (unset) {
1774 opts->use_clockid = 0;
1775 return 0;
1776 }
1777
1778 /* no arg passed */
1779 if (!str)
1780 return 0;
1781
1782 /* no setting it twice */
1783 if (opts->use_clockid)
1784 return -1;
1785
1786 opts->use_clockid = true;
1787
1788 /* if its a number, we're done */
1789 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001790 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001791
1792 /* allow a "CLOCK_" prefix to the name */
1793 if (!strncasecmp(str, "CLOCK_", 6))
1794 str += 6;
1795
1796 for (cm = clockids; cm->name; cm++) {
1797 if (!strcasecmp(str, cm->name)) {
1798 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001799 return get_clockid_res(opts->clockid,
1800 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001801 }
1802 }
1803
1804 opts->use_clockid = false;
1805 ui__warning("unknown clockid %s, check man page\n", ostr);
1806 return -1;
1807}
1808
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001809static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1810{
1811 struct record_opts *opts = (struct record_opts *)opt->value;
1812
1813 if (unset || !str)
1814 return 0;
1815
1816 if (!strcasecmp(str, "node"))
1817 opts->affinity = PERF_AFFINITY_NODE;
1818 else if (!strcasecmp(str, "cpu"))
1819 opts->affinity = PERF_AFFINITY_CPU;
1820
1821 return 0;
1822}
1823
Adrian Huntere9db1312015-04-09 18:53:46 +03001824static int record__parse_mmap_pages(const struct option *opt,
1825 const char *str,
1826 int unset __maybe_unused)
1827{
1828 struct record_opts *opts = opt->value;
1829 char *s, *p;
1830 unsigned int mmap_pages;
1831 int ret;
1832
1833 if (!str)
1834 return -EINVAL;
1835
1836 s = strdup(str);
1837 if (!s)
1838 return -ENOMEM;
1839
1840 p = strchr(s, ',');
1841 if (p)
1842 *p = '\0';
1843
1844 if (*s) {
1845 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1846 if (ret)
1847 goto out_free;
1848 opts->mmap_pages = mmap_pages;
1849 }
1850
1851 if (!p) {
1852 ret = 0;
1853 goto out_free;
1854 }
1855
1856 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1857 if (ret)
1858 goto out_free;
1859
1860 opts->auxtrace_mmap_pages = mmap_pages;
1861
1862out_free:
1863 free(s);
1864 return ret;
1865}
1866
Jiri Olsa0c582442017-01-09 10:51:59 +01001867static void switch_output_size_warn(struct record *rec)
1868{
1869 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1870 struct switch_output *s = &rec->switch_output;
1871
1872 wakeup_size /= 2;
1873
1874 if (s->size < wakeup_size) {
1875 char buf[100];
1876
1877 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1878 pr_warning("WARNING: switch-output data size lower than "
1879 "wakeup kernel buffer size (%s) "
1880 "expect bigger perf.data sizes\n", buf);
1881 }
1882}
1883
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001884static int switch_output_setup(struct record *rec)
1885{
1886 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001887 static struct parse_tag tags_size[] = {
1888 { .tag = 'B', .mult = 1 },
1889 { .tag = 'K', .mult = 1 << 10 },
1890 { .tag = 'M', .mult = 1 << 20 },
1891 { .tag = 'G', .mult = 1 << 30 },
1892 { .tag = 0 },
1893 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001894 static struct parse_tag tags_time[] = {
1895 { .tag = 's', .mult = 1 },
1896 { .tag = 'm', .mult = 60 },
1897 { .tag = 'h', .mult = 60*60 },
1898 { .tag = 'd', .mult = 60*60*24 },
1899 { .tag = 0 },
1900 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001901 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001902
1903 if (!s->set)
1904 return 0;
1905
1906 if (!strcmp(s->str, "signal")) {
1907 s->signal = true;
1908 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001909 goto enabled;
1910 }
1911
1912 val = parse_tag_value(s->str, tags_size);
1913 if (val != (unsigned long) -1) {
1914 s->size = val;
1915 pr_debug("switch-output with %s size threshold\n", s->str);
1916 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001917 }
1918
Jiri Olsabfacbe32017-01-09 10:52:00 +01001919 val = parse_tag_value(s->str, tags_time);
1920 if (val != (unsigned long) -1) {
1921 s->time = val;
1922 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1923 s->str, s->time);
1924 goto enabled;
1925 }
1926
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001927 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001928
1929enabled:
1930 rec->timestamp_filename = true;
1931 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001932
1933 if (s->size && !rec->opts.no_buffering)
1934 switch_output_size_warn(rec);
1935
Jiri Olsadc0c6122017-01-09 10:51:58 +01001936 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001937}
1938
Namhyung Kime5b2c202014-10-23 00:15:46 +09001939static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001940 "perf record [<options>] [<command>]",
1941 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001942 NULL
1943};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001944const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001945
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001946/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001947 * XXX Ideally would be local to cmd_record() and passed to a record__new
1948 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001949 * after cmd_record() exits, but since record_options need to be accessible to
1950 * builtin-script, leave it here.
1951 *
1952 * At least we don't ouch it in all the other functions here directly.
1953 *
1954 * Just say no to tons of global variables, sigh.
1955 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001956static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001957 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001958 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001959 .mmap_pages = UINT_MAX,
1960 .user_freq = UINT_MAX,
1961 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001962 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001963 .target = {
1964 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001965 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001966 },
Alexey Budankov470530b2019-03-18 20:40:26 +03001967 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001968 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001969 .tool = {
1970 .sample = process_sample_event,
1971 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001972 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001973 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301974 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001975 .mmap = perf_event__process_mmap,
1976 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001977 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001978 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001979};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001980
Namhyung Kim76a26542015-10-22 23:28:32 +09001981const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1982 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001983
Wang Nan0aab2132016-06-16 08:02:41 +00001984static bool dry_run;
1985
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001986/*
1987 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1988 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001989 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001990 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1991 * using pipes, etc.
1992 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001993static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001994 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001995 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001996 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001997 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001998 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001999 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2000 NULL, "don't record events from perf itself",
2001 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002002 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002003 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002004 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002005 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002006 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002007 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002008 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002009 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002010 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002011 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002012 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002013 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002014 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002015 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002016 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002017 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002018 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002019 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2020 &record.opts.no_inherit_set,
2021 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002022 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2023 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002024 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002025 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002026 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2027 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002028 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2029 "profile at this frequency",
2030 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002031 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2032 "number of mmap data pages and AUX area tracing mmap pages",
2033 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002034 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2035 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2036 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002037 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002038 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002039 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002040 NULL, "enables call-graph recording" ,
2041 &record_callchain_opt),
2042 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002043 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002044 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002045 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002046 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002047 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002048 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002049 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002050 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002051 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2052 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002053 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002054 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2055 &record.opts.sample_time_set,
2056 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002057 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2058 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002059 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002060 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002061 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2062 &record.no_buildid_cache_set,
2063 "do not update the buildid cache"),
2064 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2065 &record.no_buildid_set,
2066 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002067 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002068 "monitor event in cgroup name only",
2069 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002070 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002071 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002072 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2073 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002074
2075 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2076 "branch any", "sample any taken branches",
2077 parse_branch_stack),
2078
2079 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2080 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002081 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002082 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2083 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002084 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2085 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002086 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2087 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002088 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2089 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002090 " use '-I?' to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002091 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2092 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002093 " use '-I?' to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002094 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2095 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002096 OPT_CALLBACK('k', "clockid", &record.opts,
2097 "clockid", "clockid to use for events, see clock_gettime()",
2098 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002099 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2100 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002101 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002102 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302103 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2104 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002105 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2106 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002107 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2108 "Configure all used events to run in kernel space.",
2109 PARSE_OPT_EXCLUSIVE),
2110 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2111 "Configure all used events to run in user space.",
2112 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00002113 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2114 "clang binary to use for compiling BPF scriptlets"),
2115 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2116 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002117 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2118 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002119 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2120 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002121 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2122 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002123 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2124 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002125 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002126 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2127 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002128 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002129 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2130 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002131 OPT_BOOLEAN(0, "dry-run", &dry_run,
2132 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002133#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002134 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2135 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002136 record__aio_parse),
2137#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002138 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2139 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2140 record__parse_affinity),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002141 OPT_END()
2142};
2143
Namhyung Kime5b2c202014-10-23 00:15:46 +09002144struct option *record_options = __record_options;
2145
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002146int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002147{
Adrian Hunteref149c22015-04-09 18:53:45 +03002148 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002149 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002150 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002151
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002152 setlocale(LC_ALL, "");
2153
Wang Nan48e1cab2015-12-14 10:39:22 +00002154#ifndef HAVE_LIBBPF_SUPPORT
2155# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2156 set_nobuild('\0', "clang-path", true);
2157 set_nobuild('\0', "clang-opt", true);
2158# undef set_nobuild
2159#endif
2160
He Kuang7efe0e02015-12-14 10:39:23 +00002161#ifndef HAVE_BPF_PROLOGUE
2162# if !defined (HAVE_DWARF_SUPPORT)
2163# define REASON "NO_DWARF=1"
2164# elif !defined (HAVE_LIBBPF_SUPPORT)
2165# define REASON "NO_LIBBPF=1"
2166# else
2167# define REASON "this architecture doesn't support BPF prologue"
2168# endif
2169# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2170 set_nobuild('\0', "vmlinux", true);
2171# undef set_nobuild
2172# undef REASON
2173#endif
2174
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002175 CPU_ZERO(&rec->affinity_mask);
2176 rec->opts.affinity = PERF_AFFINITY_SYS;
2177
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002178 rec->evlist = perf_evlist__new();
2179 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002180 return -ENOMEM;
2181
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002182 err = perf_config(perf_record_config, rec);
2183 if (err)
2184 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002185
Tom Zanussibca647a2010-11-10 08:11:30 -06002186 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002187 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002188 if (quiet)
2189 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002190
2191 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002192 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002193 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002194
Namhyung Kimbea03402012-04-26 14:15:15 +09002195 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002196 usage_with_options_msg(record_usage, record_options,
2197 "cgroup monitoring only available in system-wide mode");
2198
Stephane Eranian023695d2011-02-14 11:20:01 +02002199 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002200 if (rec->opts.record_switch_events &&
2201 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002202 ui__error("kernel does not support recording context switch events\n");
2203 parse_options_usage(record_usage, record_options, "switch-events", 0);
2204 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002205 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002206
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002207 if (switch_output_setup(rec)) {
2208 parse_options_usage(record_usage, record_options, "switch-output", 0);
2209 return -EINVAL;
2210 }
2211
Jiri Olsabfacbe32017-01-09 10:52:00 +01002212 if (rec->switch_output.time) {
2213 signal(SIGALRM, alarm_sig_handler);
2214 alarm(rec->switch_output.time);
2215 }
2216
Andi Kleen03724b22019-03-14 15:49:55 -07002217 if (rec->switch_output.num_files) {
2218 rec->switch_output.filenames = calloc(sizeof(char *),
2219 rec->switch_output.num_files);
2220 if (!rec->switch_output.filenames)
2221 return -EINVAL;
2222 }
2223
Adrian Hunter1b36c032016-09-23 17:38:39 +03002224 /*
2225 * Allow aliases to facilitate the lookup of symbols for address
2226 * filters. Refer to auxtrace_parse_filters().
2227 */
2228 symbol_conf.allow_aliases = true;
2229
2230 symbol__init(NULL);
2231
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002232 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002233 if (err)
2234 goto out;
2235
Wang Nan0aab2132016-06-16 08:02:41 +00002236 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002237 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002238
Wang Nand7888572016-04-08 15:07:24 +00002239 err = bpf__setup_stdout(rec->evlist);
2240 if (err) {
2241 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2242 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2243 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002244 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002245 }
2246
Adrian Hunteref149c22015-04-09 18:53:45 +03002247 err = -ENOMEM;
2248
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002249 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002250 pr_warning(
2251"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2252"check /proc/sys/kernel/kptr_restrict.\n\n"
2253"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2254"file is not found in the buildid cache or in the vmlinux path.\n\n"
2255"Samples in kernel modules won't be resolved at all.\n\n"
2256"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2257"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002258
Wang Nan0c1d46a2016-04-20 18:59:52 +00002259 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002260 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002261 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002262 /*
2263 * In 'perf record --switch-output', disable buildid
2264 * generation by default to reduce data file switching
2265 * overhead. Still generate buildid if they are required
2266 * explicitly using
2267 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002268 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002269 * --no-no-buildid-cache
2270 *
2271 * Following code equals to:
2272 *
2273 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2274 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2275 * disable_buildid_cache();
2276 */
2277 bool disable = true;
2278
2279 if (rec->no_buildid_set && !rec->no_buildid)
2280 disable = false;
2281 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2282 disable = false;
2283 if (disable) {
2284 rec->no_buildid = true;
2285 rec->no_buildid_cache = true;
2286 disable_buildid_cache();
2287 }
2288 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002289
Wang Nan4ea648a2016-07-14 08:34:47 +00002290 if (record.opts.overwrite)
2291 record.opts.tail_synthesize = true;
2292
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002293 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002294 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002295 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002296 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002297 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002298
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002299 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2300 rec->opts.no_inherit = true;
2301
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002302 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002303 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002304 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002305 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002306 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002307
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002308 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002309 if (err) {
2310 int saved_errno = errno;
2311
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002312 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002313 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002314
2315 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002316 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002317 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002318
Mengting Zhangca800062017-12-13 15:01:53 +08002319 /* Enable ignoring missing threads when -u/-p option is defined. */
2320 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002321
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002322 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002323 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002324 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002325
Adrian Hunteref149c22015-04-09 18:53:45 +03002326 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2327 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002328 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002329
Namhyung Kim61566812016-01-11 22:37:09 +09002330 /*
2331 * We take all buildids when the file contains
2332 * AUX area tracing data because we do not decode the
2333 * trace because it would take too long.
2334 */
2335 if (rec->opts.full_auxtrace)
2336 rec->buildid_all = true;
2337
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002338 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002339 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002340 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002341 }
2342
Alexey Budankov93f20c02018-11-06 12:07:19 +03002343 if (rec->opts.nr_cblocks > nr_cblocks_max)
2344 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002345 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002346
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002347 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002348 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002349
Alexey Budankov51255a82019-03-18 20:42:19 +03002350 if (rec->opts.comp_level > comp_level_max)
2351 rec->opts.comp_level = comp_level_max;
2352 pr_debug("comp level: %d\n", rec->opts.comp_level);
2353
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002354 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002355out:
Namhyung Kim45604712014-05-12 09:47:24 +09002356 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002357 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002358 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002359 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002360}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002361
2362static void snapshot_sig_handler(int sig __maybe_unused)
2363{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002364 struct record *rec = &record;
2365
Wang Nan5f9cf592016-04-20 18:59:49 +00002366 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2367 trigger_hit(&auxtrace_snapshot_trigger);
2368 auxtrace_record__snapshot_started = 1;
2369 if (auxtrace_record__snapshot_start(record.itr))
2370 trigger_error(&auxtrace_snapshot_trigger);
2371 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002372
Jiri Olsadc0c6122017-01-09 10:51:58 +01002373 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002374 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002375}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002376
2377static void alarm_sig_handler(int sig __maybe_unused)
2378{
2379 struct record *rec = &record;
2380
2381 if (switch_output_time(rec))
2382 trigger_hit(&switch_output_trigger);
2383}