blob: 0ab7dd0e4f2bc8b0a8a857c9841d6c2c9bc9be35 [file] [log] [blame]
Ingo Molnarabaff322009-06-02 22:59:57 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02007 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +02009
10#include "perf.h"
11
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020012#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020013#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060014#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090016#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020017
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030018#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030019#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020020#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020021#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020022#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020023#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020024#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060025#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030041#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030042#include "util/units.h"
Wang Nand8871ea2016-02-26 09:32:06 +000043#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020044
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030045#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030046#include <inttypes.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030047#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020048#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020049#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030050#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030051#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030052#include <sys/wait.h>
Wang Nan2d11c652016-05-23 07:13:39 +000053#include <asm/bug.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030054#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030055
Jiri Olsa1b43b702017-01-09 10:51:56 +010056struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010057 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010058 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010060 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010061 const char *str;
62 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010063};
64
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030065struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020066 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030067 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020068 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010069 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030070 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020071 struct perf_evlist *evlist;
72 struct perf_session *session;
73 const char *progname;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020074 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000076 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000078 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090079 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000080 bool timestamp_filename;
Jiri Olsa1b43b702017-01-09 10:51:56 +010081 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070082 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020083};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020084
Jiri Olsadc0c6122017-01-09 10:51:58 +010085static volatile int auxtrace_record__snapshot_started;
86static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
87static DEFINE_TRIGGER(switch_output_trigger);
88
89static bool switch_output_signal(struct record *rec)
90{
91 return rec->switch_output.signal &&
92 trigger_is_ready(&switch_output_trigger);
93}
94
95static bool switch_output_size(struct record *rec)
96{
97 return rec->switch_output.size &&
98 trigger_is_ready(&switch_output_trigger) &&
99 (rec->bytes_written >= rec->switch_output.size);
100}
101
Jiri Olsabfacbe32017-01-09 10:52:00 +0100102static bool switch_output_time(struct record *rec)
103{
104 return rec->switch_output.time &&
105 trigger_is_ready(&switch_output_trigger);
106}
107
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300108static int record__write(struct record *rec, void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200109{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100110 if (perf_data__write(rec->session->data, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100111 pr_err("failed to write perf data, error: %m\n");
112 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200113 }
David Ahern8d3eca22012-08-26 12:24:47 -0600114
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300115 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100116
117 if (switch_output_size(rec))
118 trigger_hit(&switch_output_trigger);
119
David Ahern8d3eca22012-08-26 12:24:47 -0600120 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200121}
122
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200123static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200124 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300125 struct perf_sample *sample __maybe_unused,
126 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200127{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300128 struct record *rec = container_of(tool, struct record, tool);
129 return record__write(rec, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200130}
131
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300132static int record__pushfn(void *to, void *bf, size_t size)
133{
134 struct record *rec = to;
135
136 rec->samples++;
137 return record__write(rec, bf, size);
138}
139
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300140static volatile int done;
141static volatile int signr = -1;
142static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000143
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300144static void sig_handler(int sig)
145{
146 if (sig == SIGCHLD)
147 child_finished = 1;
148 else
149 signr = sig;
150
151 done = 1;
152}
153
Wang Nana0748652016-11-26 07:03:28 +0000154static void sigsegv_handler(int sig)
155{
156 perf_hooks__recover();
157 sighandler_dump_stack(sig);
158}
159
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300160static void record__sig_exit(void)
161{
162 if (signr == -1)
163 return;
164
165 signal(signr, SIG_DFL);
166 raise(signr);
167}
168
Adrian Huntere31f0d02015-04-30 17:37:27 +0300169#ifdef HAVE_AUXTRACE_SUPPORT
170
Adrian Hunteref149c22015-04-09 18:53:45 +0300171static int record__process_auxtrace(struct perf_tool *tool,
172 union perf_event *event, void *data1,
173 size_t len1, void *data2, size_t len2)
174{
175 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100176 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300177 size_t padding;
178 u8 pad[8] = {0};
179
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100180 if (!perf_data__is_pipe(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300181 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100182 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300183 int err;
184
185 file_offset = lseek(fd, 0, SEEK_CUR);
186 if (file_offset == -1)
187 return -1;
188 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
189 event, file_offset);
190 if (err)
191 return err;
192 }
193
Adrian Hunteref149c22015-04-09 18:53:45 +0300194 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
195 padding = (len1 + len2) & 7;
196 if (padding)
197 padding = 8 - padding;
198
199 record__write(rec, event, event->header.size);
200 record__write(rec, data1, len1);
201 if (len2)
202 record__write(rec, data2, len2);
203 record__write(rec, &pad, padding);
204
205 return 0;
206}
207
208static int record__auxtrace_mmap_read(struct record *rec,
209 struct auxtrace_mmap *mm)
210{
211 int ret;
212
213 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
214 record__process_auxtrace);
215 if (ret < 0)
216 return ret;
217
218 if (ret)
219 rec->samples++;
220
221 return 0;
222}
223
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300224static int record__auxtrace_mmap_read_snapshot(struct record *rec,
225 struct auxtrace_mmap *mm)
226{
227 int ret;
228
229 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
230 record__process_auxtrace,
231 rec->opts.auxtrace_snapshot_size);
232 if (ret < 0)
233 return ret;
234
235 if (ret)
236 rec->samples++;
237
238 return 0;
239}
240
241static int record__auxtrace_read_snapshot_all(struct record *rec)
242{
243 int i;
244 int rc = 0;
245
246 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
247 struct auxtrace_mmap *mm =
248 &rec->evlist->mmap[i].auxtrace_mmap;
249
250 if (!mm->base)
251 continue;
252
253 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
254 rc = -1;
255 goto out;
256 }
257 }
258out:
259 return rc;
260}
261
262static void record__read_auxtrace_snapshot(struct record *rec)
263{
264 pr_debug("Recording AUX area tracing snapshot\n");
265 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000266 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300267 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000268 if (auxtrace_record__snapshot_finish(rec->itr))
269 trigger_error(&auxtrace_snapshot_trigger);
270 else
271 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300272 }
273}
274
Adrian Huntere31f0d02015-04-30 17:37:27 +0300275#else
276
277static inline
278int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
279 struct auxtrace_mmap *mm __maybe_unused)
280{
281 return 0;
282}
283
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300284static inline
285void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
286{
287}
288
289static inline
290int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
291{
292 return 0;
293}
294
Adrian Huntere31f0d02015-04-30 17:37:27 +0300295#endif
296
Wang Nancda57a82016-06-27 10:24:03 +0000297static int record__mmap_evlist(struct record *rec,
298 struct perf_evlist *evlist)
299{
300 struct record_opts *opts = &rec->opts;
301 char msg[512];
302
303 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
304 opts->auxtrace_mmap_pages,
305 opts->auxtrace_snapshot_mode) < 0) {
306 if (errno == EPERM) {
307 pr_err("Permission error mapping pages.\n"
308 "Consider increasing "
309 "/proc/sys/kernel/perf_event_mlock_kb,\n"
310 "or try again with a smaller value of -m/--mmap_pages.\n"
311 "(current value: %u,%u)\n",
312 opts->mmap_pages, opts->auxtrace_mmap_pages);
313 return -errno;
314 } else {
315 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300316 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000317 if (errno)
318 return -errno;
319 else
320 return -EINVAL;
321 }
322 }
323 return 0;
324}
325
326static int record__mmap(struct record *rec)
327{
328 return record__mmap_evlist(rec, rec->evlist);
329}
330
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300331static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200332{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300333 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200334 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200335 struct perf_evlist *evlist = rec->evlist;
336 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300337 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600338 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600339 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200340
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300341 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100342
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300343 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200344try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400345 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300346 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900347 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300348 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300349 goto try_again;
350 }
David Ahernca6a4252011-03-25 13:11:11 -0600351
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300352 rc = -errno;
353 perf_evsel__open_strerror(pos, &opts->target,
354 errno, msg, sizeof(msg));
355 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600356 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300357 }
Li Zefanc171b552009-10-15 11:22:07 +0800358 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200359
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300360 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300361 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300362 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300363 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600364 rc = -1;
365 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100366 }
367
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600368 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300369 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600370 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
371 str_error_r(errno, msg, sizeof(msg)));
372 rc = -1;
373 goto out;
374 }
375
Wang Nancda57a82016-06-27 10:24:03 +0000376 rc = record__mmap(rec);
377 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600378 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200379
Jiri Olsa563aecb2013-06-05 13:35:06 +0200380 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300381 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600382out:
383 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200384}
385
Namhyung Kime3d59112015-01-29 17:06:44 +0900386static int process_sample_event(struct perf_tool *tool,
387 union perf_event *event,
388 struct perf_sample *sample,
389 struct perf_evsel *evsel,
390 struct machine *machine)
391{
392 struct record *rec = container_of(tool, struct record, tool);
393
394 rec->samples++;
395
396 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
397}
398
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300399static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200400{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100401 struct perf_data *data = &rec->data;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200402 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200403
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100404 if (data->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300405 return 0;
406
Namhyung Kim00dc8652014-11-04 10:14:32 +0900407 /*
408 * During this process, it'll load kernel map and replace the
409 * dso->long_name to a real pathname it found. In this case
410 * we prefer the vmlinux path like
411 * /lib/modules/3.16.4/build/vmlinux
412 *
413 * rather than build-id path (in debug directory).
414 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
415 */
416 symbol_conf.ignore_vmlinux_buildid = true;
417
Namhyung Kim61566812016-01-11 22:37:09 +0900418 /*
419 * If --buildid-all is given, it marks all DSO regardless of hits,
420 * so no need to process samples.
421 */
422 if (rec->buildid_all)
423 rec->tool.sample = NULL;
424
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300425 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200426}
427
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200428static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800429{
430 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200431 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800432 /*
433 *As for guest kernel when processing subcommand record&report,
434 *we arrange module mmap prior to guest kernel mmap and trigger
435 *a preload dso because default guest module symbols are loaded
436 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
437 *method is used to avoid symbol missing when the first addr is
438 *in module instead of in guest kernel.
439 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200440 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200441 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800442 if (err < 0)
443 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300444 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800445
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800446 /*
447 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
448 * have no _text sometimes.
449 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200450 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200451 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800452 if (err < 0)
453 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300454 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800455}
456
Frederic Weisbecker98402802010-05-02 22:05:29 +0200457static struct perf_event_header finished_round_event = {
458 .size = sizeof(struct perf_event_header),
459 .type = PERF_RECORD_FINISHED_ROUND,
460};
461
Wang Nana4ea0ec2016-07-14 08:34:36 +0000462static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
463 bool backward)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200464{
Jiri Olsadcabb502014-07-25 16:56:16 +0200465 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200466 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600467 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000468 struct perf_mmap *maps;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200469
Wang Nancb216862016-06-27 10:24:04 +0000470 if (!evlist)
471 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300472
Wang Nanb2cb6152016-07-14 08:34:39 +0000473 maps = backward ? evlist->backward_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000474 if (!maps)
475 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000476
Wang Nan54cc54d2016-07-14 08:34:42 +0000477 if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
478 return 0;
479
Wang Nana4ea0ec2016-07-14 08:34:36 +0000480 for (i = 0; i < evlist->nr_mmaps; i++) {
481 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
482
483 if (maps[i].base) {
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300484 if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600485 rc = -1;
486 goto out;
487 }
488 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300489
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300490 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunteref149c22015-04-09 18:53:45 +0300491 record__auxtrace_mmap_read(rec, mm) != 0) {
492 rc = -1;
493 goto out;
494 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200495 }
496
Jiri Olsadcabb502014-07-25 16:56:16 +0200497 /*
498 * Mark the round finished in case we wrote
499 * at least one event.
500 */
501 if (bytes_written != rec->bytes_written)
502 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600503
Wang Nan54cc54d2016-07-14 08:34:42 +0000504 if (backward)
505 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600506out:
507 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200508}
509
Wang Nancb216862016-06-27 10:24:04 +0000510static int record__mmap_read_all(struct record *rec)
511{
512 int err;
513
Wang Nana4ea0ec2016-07-14 08:34:36 +0000514 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000515 if (err)
516 return err;
517
Wang Nan057374642016-07-14 08:34:43 +0000518 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000519}
520
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300521static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700522{
David Ahern57706ab2013-11-06 11:41:34 -0700523 struct perf_session *session = rec->session;
524 int feat;
525
526 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
527 perf_header__set_feat(&session->header, feat);
528
529 if (rec->no_buildid)
530 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
531
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300532 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700533 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
534
535 if (!rec->opts.branch_stack)
536 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300537
538 if (!rec->opts.full_auxtrace)
539 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100540
541 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700542}
543
Wang Nane1ab48b2016-02-26 09:32:10 +0000544static void
545record__finish_output(struct record *rec)
546{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100547 struct perf_data *data = &rec->data;
548 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000549
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100550 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000551 return;
552
553 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100554 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000555
556 if (!rec->no_buildid) {
557 process_buildids(rec);
558
559 if (rec->buildid_all)
560 dsos__hit_all(rec->session);
561 }
562 perf_session__write_header(rec->session, rec->evlist, fd, true);
563
564 return;
565}
566
Wang Nan4ea648a2016-07-14 08:34:47 +0000567static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000568{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300569 int err;
570 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000571
Wang Nan4ea648a2016-07-14 08:34:47 +0000572 if (rec->opts.tail_synthesize != tail)
573 return 0;
574
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300575 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
576 if (thread_map == NULL)
577 return -1;
578
579 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000580 process_synthesized_event,
581 &rec->session->machines.host,
582 rec->opts.sample_address,
583 rec->opts.proc_map_timeout);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300584 thread_map__put(thread_map);
585 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000586}
587
Wang Nan4ea648a2016-07-14 08:34:47 +0000588static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000589
Wang Nanecfd7a92016-04-13 08:21:07 +0000590static int
591record__switch_output(struct record *rec, bool at_exit)
592{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100593 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000594 int fd, err;
595
596 /* Same Size: "2015122520103046"*/
597 char timestamp[] = "InvalidTimestamp";
598
Wang Nan4ea648a2016-07-14 08:34:47 +0000599 record__synthesize(rec, true);
600 if (target__none(&rec->opts.target))
601 record__synthesize_workload(rec, true);
602
Wang Nanecfd7a92016-04-13 08:21:07 +0000603 rec->samples = 0;
604 record__finish_output(rec);
605 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
606 if (err) {
607 pr_err("Failed to get current timestamp\n");
608 return -EINVAL;
609 }
610
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100611 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000612 rec->session->header.data_offset,
613 at_exit);
614 if (fd >= 0 && !at_exit) {
615 rec->bytes_written = 0;
616 rec->session->header.data_size = 0;
617 }
618
619 if (!quiet)
620 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100621 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000622
623 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000624 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000625 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000626
Wang Nanbe7b0c92016-04-20 18:59:54 +0000627 /*
628 * In 'perf record --switch-output' without -a,
629 * record__synthesize() in record__switch_output() won't
630 * generate tracking events because there's no thread_map
631 * in evlist. Which causes newly created perf.data doesn't
632 * contain map and comm information.
633 * Create a fake thread_map and directly call
634 * perf_event__synthesize_thread_map() for those events.
635 */
636 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000637 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000638 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000639 return fd;
640}
641
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300642static volatile int workload_exec_errno;
643
644/*
645 * perf_evlist__prepare_workload will send a SIGUSR1
646 * if the fork fails, since we asked by setting its
647 * want_signal to true.
648 */
Namhyung Kim45604712014-05-12 09:47:24 +0900649static void workload_exec_failed_signal(int signo __maybe_unused,
650 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300651 void *ucontext __maybe_unused)
652{
653 workload_exec_errno = info->si_value.sival_int;
654 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300655 child_finished = 1;
656}
657
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300658static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100659static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300660
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200661int __weak
662perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
663 struct perf_tool *tool __maybe_unused,
664 perf_event__handler_t process __maybe_unused,
665 struct machine *machine __maybe_unused)
666{
667 return 0;
668}
669
Wang Nanee667f92016-06-27 10:24:05 +0000670static const struct perf_event_mmap_page *
671perf_evlist__pick_pc(struct perf_evlist *evlist)
672{
Wang Nanb2cb6152016-07-14 08:34:39 +0000673 if (evlist) {
674 if (evlist->mmap && evlist->mmap[0].base)
675 return evlist->mmap[0].base;
676 if (evlist->backward_mmap && evlist->backward_mmap[0].base)
677 return evlist->backward_mmap[0].base;
678 }
Wang Nanee667f92016-06-27 10:24:05 +0000679 return NULL;
680}
681
Wang Nanc45628b2016-05-24 02:28:59 +0000682static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
683{
Wang Nanee667f92016-06-27 10:24:05 +0000684 const struct perf_event_mmap_page *pc;
685
686 pc = perf_evlist__pick_pc(rec->evlist);
687 if (pc)
688 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000689 return NULL;
690}
691
Wang Nan4ea648a2016-07-14 08:34:47 +0000692static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000693{
694 struct perf_session *session = rec->session;
695 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100696 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000697 struct record_opts *opts = &rec->opts;
698 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100699 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +0000700 int err = 0;
701
Wang Nan4ea648a2016-07-14 08:34:47 +0000702 if (rec->opts.tail_synthesize != tail)
703 return 0;
704
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100705 if (data->is_pipe) {
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -0700706 err = perf_event__synthesize_features(
707 tool, session, rec->evlist, process_synthesized_event);
708 if (err < 0) {
709 pr_err("Couldn't synthesize features.\n");
710 return err;
711 }
712
Wang Nanc45c86e2016-02-26 09:32:07 +0000713 err = perf_event__synthesize_attrs(tool, session,
714 process_synthesized_event);
715 if (err < 0) {
716 pr_err("Couldn't synthesize attrs.\n");
717 goto out;
718 }
719
720 if (have_tracepoints(&rec->evlist->entries)) {
721 /*
722 * FIXME err <= 0 here actually means that
723 * there were no tracepoints so its not really
724 * an error, just that we don't need to
725 * synthesize anything. We really have to
726 * return this more properly and also
727 * propagate errors that now are calling die()
728 */
729 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
730 process_synthesized_event);
731 if (err <= 0) {
732 pr_err("Couldn't record tracing data.\n");
733 goto out;
734 }
735 rec->bytes_written += err;
736 }
737 }
738
Wang Nanc45628b2016-05-24 02:28:59 +0000739 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200740 process_synthesized_event, machine);
741 if (err)
742 goto out;
743
Wang Nanc45c86e2016-02-26 09:32:07 +0000744 if (rec->opts.full_auxtrace) {
745 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
746 session, process_synthesized_event);
747 if (err)
748 goto out;
749 }
750
751 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
752 machine);
753 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
754 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
755 "Check /proc/kallsyms permission or run as root.\n");
756
757 err = perf_event__synthesize_modules(tool, process_synthesized_event,
758 machine);
759 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
760 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
761 "Check /proc/modules permission or run as root.\n");
762
763 if (perf_guest) {
764 machines__process_guests(&session->machines,
765 perf_event__synthesize_guest_os, tool);
766 }
767
768 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
769 process_synthesized_event, opts->sample_address,
Kan Liang340b47f2017-09-29 07:47:54 -0700770 opts->proc_map_timeout, 1);
Wang Nanc45c86e2016-02-26 09:32:07 +0000771out:
772 return err;
773}
774
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300775static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200776{
David Ahern57706ab2013-11-06 11:41:34 -0700777 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900778 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200779 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300780 const bool forks = argc > 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300781 struct machine *machine;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200782 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300783 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100784 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200785 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300786 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900787 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200788
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200789 rec->progname = argv[0];
Andi Kleen33e49ea2011-09-15 14:31:40 -0700790
Namhyung Kim45604712014-05-12 09:47:24 +0900791 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200792 signal(SIGCHLD, sig_handler);
793 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600794 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +0000795 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000796
Hari Bathinif3b36142017-03-08 02:11:43 +0530797 if (rec->opts.record_namespaces)
798 tool->namespace_events = true;
799
Jiri Olsadc0c6122017-01-09 10:51:58 +0100800 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300801 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000802 if (rec->opts.auxtrace_snapshot_mode)
803 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +0100804 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000805 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000806 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300807 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000808 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200809
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100810 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200811 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900812 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200813 return -1;
814 }
815
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100816 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200817 rec->session = session;
818
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300819 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100820
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200821 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300822 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100823 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300824 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200825 if (err < 0) {
826 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900827 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200828 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200829 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100830 }
831
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300832 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600833 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900834 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600835 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200836
Wang Nan8690a2a2016-02-22 09:10:32 +0000837 err = bpf__apply_obj_config();
838 if (err) {
839 char errbuf[BUFSIZ];
840
841 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
842 pr_err("ERROR: Apply config to BPF failed: %s\n",
843 errbuf);
844 goto out_child;
845 }
846
Adrian Huntercca84822015-08-19 17:29:21 +0300847 /*
848 * Normally perf_session__new would do this, but it doesn't have the
849 * evlist.
850 */
851 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
852 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
853 rec->tool.ordered_events = false;
854 }
855
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300856 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900857 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
858
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100859 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900860 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500861 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900862 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200863 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900864 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200865 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900866 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200867 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200868
David Ahernd3665492012-02-06 15:27:52 -0700869 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100870 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700871 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100872 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600873 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900874 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100875 }
876
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300877 machine = &session->machines.host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200878
Wang Nan4ea648a2016-07-14 08:34:47 +0000879 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +0000880 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900881 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600882
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200883 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200884 struct sched_param param;
885
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200886 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200887 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200888 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600889 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900890 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200891 }
892 }
893
Jiri Olsa774cb492012-11-12 18:34:01 +0100894 /*
895 * When perf is starting the traced process, all the events
896 * (apart from group members) have enable_on_exec=1 set,
897 * so don't spoil it by prematurely enabling them.
898 */
Andi Kleen6619a532014-01-11 13:38:27 -0800899 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300900 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600901
Peter Zijlstra856e9662009-12-16 17:55:55 +0100902 /*
903 * Let the child rip
904 */
Namhyung Kime803cf92015-09-22 09:24:55 +0900905 if (forks) {
Namhyung Kime5bed562015-09-30 10:45:24 +0900906 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +0530907 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +0900908
909 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
910 if (event == NULL) {
911 err = -ENOMEM;
912 goto out_child;
913 }
914
Namhyung Kime803cf92015-09-22 09:24:55 +0900915 /*
916 * Some H/W events are generated before COMM event
917 * which is emitted during exec(), so perf script
918 * cannot see a correct process name for those events.
919 * Synthesize COMM event to prevent it.
920 */
Hari Bathinie907caf2017-03-08 02:11:51 +0530921 tgid = perf_event__synthesize_comm(tool, event,
922 rec->evlist->workload.pid,
923 process_synthesized_event,
924 machine);
925 free(event);
926
927 if (tgid == -1)
928 goto out_child;
929
930 event = malloc(sizeof(event->namespaces) +
931 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
932 machine->id_hdr_size);
933 if (event == NULL) {
934 err = -ENOMEM;
935 goto out_child;
936 }
937
938 /*
939 * Synthesize NAMESPACES event for the command specified.
940 */
941 perf_event__synthesize_namespaces(tool, event,
942 rec->evlist->workload.pid,
943 tgid, process_synthesized_event,
944 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +0900945 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +0900946
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300947 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +0900948 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100949
Andi Kleen6619a532014-01-11 13:38:27 -0800950 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -0300951 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -0800952 perf_evlist__enable(rec->evlist);
953 }
954
Wang Nan5f9cf592016-04-20 18:59:49 +0000955 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000956 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +0000957 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200958 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -0700959 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200960
Wang Nan057374642016-07-14 08:34:43 +0000961 /*
962 * rec->evlist->bkw_mmap_state is possible to be
963 * BKW_MMAP_EMPTY here: when done == true and
964 * hits != rec->samples in previous round.
965 *
966 * perf_evlist__toggle_bkw_mmap ensure we never
967 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
968 */
969 if (trigger_is_hit(&switch_output_trigger) || done || draining)
970 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
971
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300972 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000973 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000974 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -0600975 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900976 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600977 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200978
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300979 if (auxtrace_record__snapshot_started) {
980 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +0000981 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300982 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +0000983 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300984 pr_err("AUX area tracing snapshot failed\n");
985 err = -1;
986 goto out_child;
987 }
988 }
989
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000990 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +0000991 /*
992 * If switch_output_trigger is hit, the data in
993 * overwritable ring buffer should have been collected,
994 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
995 *
996 * If SIGUSR2 raise after or during record__mmap_read_all(),
997 * record__mmap_read_all() didn't collect data from
998 * overwritable ring buffer. Read again.
999 */
1000 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1001 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001002 trigger_ready(&switch_output_trigger);
1003
Wang Nan057374642016-07-14 08:34:43 +00001004 /*
1005 * Reenable events in overwrite ring buffer after
1006 * record__mmap_read_all(): we should have collected
1007 * data from it.
1008 */
1009 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1010
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001011 if (!quiet)
1012 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1013 waking);
1014 waking = 0;
1015 fd = record__switch_output(rec, false);
1016 if (fd < 0) {
1017 pr_err("Failed to switch to new file\n");
1018 trigger_error(&switch_output_trigger);
1019 err = fd;
1020 goto out_child;
1021 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001022
1023 /* re-arm the alarm */
1024 if (rec->switch_output.time)
1025 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001026 }
1027
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001028 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001029 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001030 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001031 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001032 /*
1033 * Propagate error, only if there's any. Ignore positive
1034 * number of returned events and interrupt error.
1035 */
1036 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001037 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001038 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001039
1040 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1041 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001042 }
1043
Jiri Olsa774cb492012-11-12 18:34:01 +01001044 /*
1045 * When perf is starting the traced process, at the end events
1046 * die with the process and we wait for that. Thus no need to
1047 * disable events in this case.
1048 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001049 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001050 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001051 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001052 disabled = true;
1053 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001054 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001055 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001056 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001057
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001058 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001059 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001060 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001061 pr_err("Workload failed: %s\n", emsg);
1062 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001063 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001064 }
1065
Namhyung Kime3d59112015-01-29 17:06:44 +09001066 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001067 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001068
Wang Nan4ea648a2016-07-14 08:34:47 +00001069 if (target__none(&rec->opts.target))
1070 record__synthesize_workload(rec, true);
1071
Namhyung Kim45604712014-05-12 09:47:24 +09001072out_child:
1073 if (forks) {
1074 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001075
Namhyung Kim45604712014-05-12 09:47:24 +09001076 if (!child_finished)
1077 kill(rec->evlist->workload.pid, SIGTERM);
1078
1079 wait(&exit_status);
1080
1081 if (err < 0)
1082 status = err;
1083 else if (WIFEXITED(exit_status))
1084 status = WEXITSTATUS(exit_status);
1085 else if (WIFSIGNALED(exit_status))
1086 signr = WTERMSIG(exit_status);
1087 } else
1088 status = err;
1089
Wang Nan4ea648a2016-07-14 08:34:47 +00001090 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001091 /* this will be recalculated during process_buildids() */
1092 rec->samples = 0;
1093
Wang Nanecfd7a92016-04-13 08:21:07 +00001094 if (!err) {
1095 if (!rec->timestamp_filename) {
1096 record__finish_output(rec);
1097 } else {
1098 fd = record__switch_output(rec, true);
1099 if (fd < 0) {
1100 status = fd;
1101 goto out_delete_session;
1102 }
1103 }
1104 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001105
Wang Nana0748652016-11-26 07:03:28 +00001106 perf_hooks__invoke_record_end();
1107
Namhyung Kime3d59112015-01-29 17:06:44 +09001108 if (!err && !quiet) {
1109 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001110 const char *postfix = rec->timestamp_filename ?
1111 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001112
Adrian Hunteref149c22015-04-09 18:53:45 +03001113 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001114 scnprintf(samples, sizeof(samples),
1115 " (%" PRIu64 " samples)", rec->samples);
1116 else
1117 samples[0] = '\0';
1118
Wang Nanecfd7a92016-04-13 08:21:07 +00001119 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001120 perf_data__size(data) / 1024.0 / 1024.0,
1121 data->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001122 }
1123
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001124out_delete_session:
1125 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001126 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001127}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001128
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001129static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001130{
Kan Liangaad2b212015-01-05 13:23:04 -05001131 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001132
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001133 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001134
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001135 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001136 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001137 callchain->dump_size);
1138}
1139
1140int record_opts__parse_callchain(struct record_opts *record,
1141 struct callchain_param *callchain,
1142 const char *arg, bool unset)
1143{
1144 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001145 callchain->enabled = !unset;
1146
1147 /* --no-call-graph */
1148 if (unset) {
1149 callchain->record_mode = CALLCHAIN_NONE;
1150 pr_debug("callchain: disabled\n");
1151 return 0;
1152 }
1153
1154 ret = parse_callchain_record_opt(arg, callchain);
1155 if (!ret) {
1156 /* Enable data address sampling for DWARF unwind. */
1157 if (callchain->record_mode == CALLCHAIN_DWARF)
1158 record->sample_address = true;
1159 callchain_debug(callchain);
1160 }
1161
1162 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001163}
1164
Kan Liangc421e802015-07-29 05:42:12 -04001165int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001166 const char *arg,
1167 int unset)
1168{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001169 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001170}
1171
Kan Liangc421e802015-07-29 05:42:12 -04001172int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001173 const char *arg __maybe_unused,
1174 int unset __maybe_unused)
1175{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001176 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001177
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001178 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001179
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001180 if (callchain->record_mode == CALLCHAIN_NONE)
1181 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001182
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001183 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001184 return 0;
1185}
1186
Jiri Olsaeb853e82014-02-03 12:44:42 +01001187static int perf_record_config(const char *var, const char *value, void *cb)
1188{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001189 struct record *rec = cb;
1190
1191 if (!strcmp(var, "record.build-id")) {
1192 if (!strcmp(value, "cache"))
1193 rec->no_buildid_cache = false;
1194 else if (!strcmp(value, "no-cache"))
1195 rec->no_buildid_cache = true;
1196 else if (!strcmp(value, "skip"))
1197 rec->no_buildid = true;
1198 else
1199 return -1;
1200 return 0;
1201 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001202 if (!strcmp(var, "record.call-graph"))
Namhyung Kim5a2e5e82014-09-23 10:01:44 +09001203 var = "call-graph.record-mode"; /* fall-through */
Jiri Olsaeb853e82014-02-03 12:44:42 +01001204
1205 return perf_default_config(var, value, cb);
1206}
1207
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001208struct clockid_map {
1209 const char *name;
1210 int clockid;
1211};
1212
1213#define CLOCKID_MAP(n, c) \
1214 { .name = n, .clockid = (c), }
1215
1216#define CLOCKID_END { .name = NULL, }
1217
1218
1219/*
1220 * Add the missing ones, we need to build on many distros...
1221 */
1222#ifndef CLOCK_MONOTONIC_RAW
1223#define CLOCK_MONOTONIC_RAW 4
1224#endif
1225#ifndef CLOCK_BOOTTIME
1226#define CLOCK_BOOTTIME 7
1227#endif
1228#ifndef CLOCK_TAI
1229#define CLOCK_TAI 11
1230#endif
1231
1232static const struct clockid_map clockids[] = {
1233 /* available for all events, NMI safe */
1234 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1235 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1236
1237 /* available for some events */
1238 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1239 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1240 CLOCKID_MAP("tai", CLOCK_TAI),
1241
1242 /* available for the lazy */
1243 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1244 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1245 CLOCKID_MAP("real", CLOCK_REALTIME),
1246 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1247
1248 CLOCKID_END,
1249};
1250
1251static int parse_clockid(const struct option *opt, const char *str, int unset)
1252{
1253 struct record_opts *opts = (struct record_opts *)opt->value;
1254 const struct clockid_map *cm;
1255 const char *ostr = str;
1256
1257 if (unset) {
1258 opts->use_clockid = 0;
1259 return 0;
1260 }
1261
1262 /* no arg passed */
1263 if (!str)
1264 return 0;
1265
1266 /* no setting it twice */
1267 if (opts->use_clockid)
1268 return -1;
1269
1270 opts->use_clockid = true;
1271
1272 /* if its a number, we're done */
1273 if (sscanf(str, "%d", &opts->clockid) == 1)
1274 return 0;
1275
1276 /* allow a "CLOCK_" prefix to the name */
1277 if (!strncasecmp(str, "CLOCK_", 6))
1278 str += 6;
1279
1280 for (cm = clockids; cm->name; cm++) {
1281 if (!strcasecmp(str, cm->name)) {
1282 opts->clockid = cm->clockid;
1283 return 0;
1284 }
1285 }
1286
1287 opts->use_clockid = false;
1288 ui__warning("unknown clockid %s, check man page\n", ostr);
1289 return -1;
1290}
1291
Adrian Huntere9db1312015-04-09 18:53:46 +03001292static int record__parse_mmap_pages(const struct option *opt,
1293 const char *str,
1294 int unset __maybe_unused)
1295{
1296 struct record_opts *opts = opt->value;
1297 char *s, *p;
1298 unsigned int mmap_pages;
1299 int ret;
1300
1301 if (!str)
1302 return -EINVAL;
1303
1304 s = strdup(str);
1305 if (!s)
1306 return -ENOMEM;
1307
1308 p = strchr(s, ',');
1309 if (p)
1310 *p = '\0';
1311
1312 if (*s) {
1313 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1314 if (ret)
1315 goto out_free;
1316 opts->mmap_pages = mmap_pages;
1317 }
1318
1319 if (!p) {
1320 ret = 0;
1321 goto out_free;
1322 }
1323
1324 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1325 if (ret)
1326 goto out_free;
1327
1328 opts->auxtrace_mmap_pages = mmap_pages;
1329
1330out_free:
1331 free(s);
1332 return ret;
1333}
1334
Jiri Olsa0c582442017-01-09 10:51:59 +01001335static void switch_output_size_warn(struct record *rec)
1336{
1337 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1338 struct switch_output *s = &rec->switch_output;
1339
1340 wakeup_size /= 2;
1341
1342 if (s->size < wakeup_size) {
1343 char buf[100];
1344
1345 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1346 pr_warning("WARNING: switch-output data size lower than "
1347 "wakeup kernel buffer size (%s) "
1348 "expect bigger perf.data sizes\n", buf);
1349 }
1350}
1351
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001352static int switch_output_setup(struct record *rec)
1353{
1354 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001355 static struct parse_tag tags_size[] = {
1356 { .tag = 'B', .mult = 1 },
1357 { .tag = 'K', .mult = 1 << 10 },
1358 { .tag = 'M', .mult = 1 << 20 },
1359 { .tag = 'G', .mult = 1 << 30 },
1360 { .tag = 0 },
1361 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001362 static struct parse_tag tags_time[] = {
1363 { .tag = 's', .mult = 1 },
1364 { .tag = 'm', .mult = 60 },
1365 { .tag = 'h', .mult = 60*60 },
1366 { .tag = 'd', .mult = 60*60*24 },
1367 { .tag = 0 },
1368 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001369 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001370
1371 if (!s->set)
1372 return 0;
1373
1374 if (!strcmp(s->str, "signal")) {
1375 s->signal = true;
1376 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001377 goto enabled;
1378 }
1379
1380 val = parse_tag_value(s->str, tags_size);
1381 if (val != (unsigned long) -1) {
1382 s->size = val;
1383 pr_debug("switch-output with %s size threshold\n", s->str);
1384 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001385 }
1386
Jiri Olsabfacbe32017-01-09 10:52:00 +01001387 val = parse_tag_value(s->str, tags_time);
1388 if (val != (unsigned long) -1) {
1389 s->time = val;
1390 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1391 s->str, s->time);
1392 goto enabled;
1393 }
1394
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001395 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001396
1397enabled:
1398 rec->timestamp_filename = true;
1399 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001400
1401 if (s->size && !rec->opts.no_buffering)
1402 switch_output_size_warn(rec);
1403
Jiri Olsadc0c6122017-01-09 10:51:58 +01001404 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001405}
1406
Namhyung Kime5b2c202014-10-23 00:15:46 +09001407static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001408 "perf record [<options>] [<command>]",
1409 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001410 NULL
1411};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001412const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001413
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001414/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001415 * XXX Ideally would be local to cmd_record() and passed to a record__new
1416 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001417 * after cmd_record() exits, but since record_options need to be accessible to
1418 * builtin-script, leave it here.
1419 *
1420 * At least we don't ouch it in all the other functions here directly.
1421 *
1422 * Just say no to tons of global variables, sigh.
1423 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001424static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001425 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001426 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001427 .mmap_pages = UINT_MAX,
1428 .user_freq = UINT_MAX,
1429 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001430 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001431 .target = {
1432 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001433 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001434 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001435 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001436 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001437 .tool = {
1438 .sample = process_sample_event,
1439 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001440 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001441 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301442 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001443 .mmap = perf_event__process_mmap,
1444 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001445 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001446 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001447};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001448
Namhyung Kim76a26542015-10-22 23:28:32 +09001449const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1450 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001451
Wang Nan0aab2132016-06-16 08:02:41 +00001452static bool dry_run;
1453
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001454/*
1455 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1456 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001457 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001458 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1459 * using pipes, etc.
1460 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001461static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001462 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001463 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001464 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001465 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001466 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001467 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1468 NULL, "don't record events from perf itself",
1469 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001470 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001471 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001472 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001473 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001474 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001475 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001476 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001477 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001478 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001479 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001480 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001481 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001482 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001483 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001484 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001485 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001486 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001487 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1488 &record.opts.no_inherit_set,
1489 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001490 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1491 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001492 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001493 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
Adrian Huntere9db1312015-04-09 18:53:46 +03001494 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1495 "number of mmap data pages and AUX area tracing mmap pages",
1496 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001497 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001498 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001499 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001500 NULL, "enables call-graph recording" ,
1501 &record_callchain_opt),
1502 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001503 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001504 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001505 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001506 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001507 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001508 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001509 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001510 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001511 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1512 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001513 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001514 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1515 &record.opts.sample_time_set,
1516 "Record the sample timestamps"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001517 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001518 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001519 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001520 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1521 &record.no_buildid_cache_set,
1522 "do not update the buildid cache"),
1523 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1524 &record.no_buildid_set,
1525 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001526 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001527 "monitor event in cgroup name only",
1528 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001529 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001530 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001531 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1532 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001533
1534 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1535 "branch any", "sample any taken branches",
1536 parse_branch_stack),
1537
1538 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1539 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001540 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001541 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1542 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001543 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1544 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001545 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1546 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001547 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1548 "sample selected machine registers on interrupt,"
1549 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001550 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1551 "sample selected machine registers on interrupt,"
1552 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001553 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1554 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001555 OPT_CALLBACK('k', "clockid", &record.opts,
1556 "clockid", "clockid to use for events, see clock_gettime()",
1557 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001558 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1559 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001560 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1561 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301562 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1563 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001564 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1565 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001566 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1567 "Configure all used events to run in kernel space.",
1568 PARSE_OPT_EXCLUSIVE),
1569 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1570 "Configure all used events to run in user space.",
1571 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001572 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1573 "clang binary to use for compiling BPF scriptlets"),
1574 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1575 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001576 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1577 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001578 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1579 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001580 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1581 "append timestamp to output filename"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001582 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001583 &record.switch_output.set, "signal,size,time",
1584 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001585 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001586 OPT_BOOLEAN(0, "dry-run", &dry_run,
1587 "Parse options then exit"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001588 OPT_END()
1589};
1590
Namhyung Kime5b2c202014-10-23 00:15:46 +09001591struct option *record_options = __record_options;
1592
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001593int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001594{
Adrian Hunteref149c22015-04-09 18:53:45 +03001595 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001596 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001597 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001598
Wang Nan48e1cab2015-12-14 10:39:22 +00001599#ifndef HAVE_LIBBPF_SUPPORT
1600# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1601 set_nobuild('\0', "clang-path", true);
1602 set_nobuild('\0', "clang-opt", true);
1603# undef set_nobuild
1604#endif
1605
He Kuang7efe0e02015-12-14 10:39:23 +00001606#ifndef HAVE_BPF_PROLOGUE
1607# if !defined (HAVE_DWARF_SUPPORT)
1608# define REASON "NO_DWARF=1"
1609# elif !defined (HAVE_LIBBPF_SUPPORT)
1610# define REASON "NO_LIBBPF=1"
1611# else
1612# define REASON "this architecture doesn't support BPF prologue"
1613# endif
1614# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1615 set_nobuild('\0', "vmlinux", true);
1616# undef set_nobuild
1617# undef REASON
1618#endif
1619
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001620 rec->evlist = perf_evlist__new();
1621 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001622 return -ENOMEM;
1623
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001624 err = perf_config(perf_record_config, rec);
1625 if (err)
1626 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001627
Tom Zanussibca647a2010-11-10 08:11:30 -06001628 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001629 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09001630 if (quiet)
1631 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01001632
1633 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001634 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01001635 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001636
Namhyung Kimbea03402012-04-26 14:15:15 +09001637 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001638 usage_with_options_msg(record_usage, record_options,
1639 "cgroup monitoring only available in system-wide mode");
1640
Stephane Eranian023695d2011-02-14 11:20:01 +02001641 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001642 if (rec->opts.record_switch_events &&
1643 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001644 ui__error("kernel does not support recording context switch events\n");
1645 parse_options_usage(record_usage, record_options, "switch-events", 0);
1646 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001647 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001648
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001649 if (switch_output_setup(rec)) {
1650 parse_options_usage(record_usage, record_options, "switch-output", 0);
1651 return -EINVAL;
1652 }
1653
Jiri Olsabfacbe32017-01-09 10:52:00 +01001654 if (rec->switch_output.time) {
1655 signal(SIGALRM, alarm_sig_handler);
1656 alarm(rec->switch_output.time);
1657 }
1658
Adrian Hunteref149c22015-04-09 18:53:45 +03001659 if (!rec->itr) {
1660 rec->itr = auxtrace_record__init(rec->evlist, &err);
1661 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001662 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001663 }
1664
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001665 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1666 rec->opts.auxtrace_snapshot_opts);
1667 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001668 goto out;
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001669
Adrian Hunter1b36c032016-09-23 17:38:39 +03001670 /*
1671 * Allow aliases to facilitate the lookup of symbols for address
1672 * filters. Refer to auxtrace_parse_filters().
1673 */
1674 symbol_conf.allow_aliases = true;
1675
1676 symbol__init(NULL);
1677
1678 err = auxtrace_parse_filters(rec->evlist);
1679 if (err)
1680 goto out;
1681
Wang Nan0aab2132016-06-16 08:02:41 +00001682 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001683 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00001684
Wang Nand7888572016-04-08 15:07:24 +00001685 err = bpf__setup_stdout(rec->evlist);
1686 if (err) {
1687 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1688 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1689 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001690 goto out;
Wang Nand7888572016-04-08 15:07:24 +00001691 }
1692
Adrian Hunteref149c22015-04-09 18:53:45 +03001693 err = -ENOMEM;
1694
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001695 if (symbol_conf.kptr_restrict)
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03001696 pr_warning(
1697"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1698"check /proc/sys/kernel/kptr_restrict.\n\n"
1699"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1700"file is not found in the buildid cache or in the vmlinux path.\n\n"
1701"Samples in kernel modules won't be resolved at all.\n\n"
1702"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1703"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001704
Wang Nan0c1d46a2016-04-20 18:59:52 +00001705 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001706 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01001707 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00001708 /*
1709 * In 'perf record --switch-output', disable buildid
1710 * generation by default to reduce data file switching
1711 * overhead. Still generate buildid if they are required
1712 * explicitly using
1713 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01001714 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00001715 * --no-no-buildid-cache
1716 *
1717 * Following code equals to:
1718 *
1719 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1720 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1721 * disable_buildid_cache();
1722 */
1723 bool disable = true;
1724
1725 if (rec->no_buildid_set && !rec->no_buildid)
1726 disable = false;
1727 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1728 disable = false;
1729 if (disable) {
1730 rec->no_buildid = true;
1731 rec->no_buildid_cache = true;
1732 disable_buildid_cache();
1733 }
1734 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001735
Wang Nan4ea648a2016-07-14 08:34:47 +00001736 if (record.opts.overwrite)
1737 record.opts.tail_synthesize = true;
1738
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001739 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03001740 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001741 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03001742 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001743 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001744
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001745 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1746 rec->opts.no_inherit = true;
1747
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001748 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001749 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001750 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001751 ui__warning("%s", errbuf);
1752 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001753
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001754 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001755 if (err) {
1756 int saved_errno = errno;
1757
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001758 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001759 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001760
1761 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001762 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001763 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001764
Jiri Olsa23dc4f12016-12-12 11:35:43 +01001765 /* Enable ignoring missing threads when -u option is defined. */
1766 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1767
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001768 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001769 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001770 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001771
Adrian Hunteref149c22015-04-09 18:53:45 +03001772 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1773 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03001774 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001775
Namhyung Kim61566812016-01-11 22:37:09 +09001776 /*
1777 * We take all buildids when the file contains
1778 * AUX area tracing data because we do not decode the
1779 * trace because it would take too long.
1780 */
1781 if (rec->opts.full_auxtrace)
1782 rec->buildid_all = true;
1783
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001784 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001785 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001786 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001787 }
1788
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001789 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03001790out:
Namhyung Kim45604712014-05-12 09:47:24 +09001791 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001792 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001793 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001794 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001795}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001796
1797static void snapshot_sig_handler(int sig __maybe_unused)
1798{
Jiri Olsadc0c6122017-01-09 10:51:58 +01001799 struct record *rec = &record;
1800
Wang Nan5f9cf592016-04-20 18:59:49 +00001801 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1802 trigger_hit(&auxtrace_snapshot_trigger);
1803 auxtrace_record__snapshot_started = 1;
1804 if (auxtrace_record__snapshot_start(record.itr))
1805 trigger_error(&auxtrace_snapshot_trigger);
1806 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001807
Jiri Olsadc0c6122017-01-09 10:51:58 +01001808 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001809 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001810}
Jiri Olsabfacbe32017-01-09 10:52:00 +01001811
1812static void alarm_sig_handler(int sig __maybe_unused)
1813{
1814 struct record *rec = &record;
1815
1816 if (switch_output_time(rec))
1817 trigger_hit(&switch_output_trigger);
1818}