blob: 488779bc4c8d2f6ed8dbcad69e1de5e477ede138 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060026#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020027#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020028#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020029#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110030#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020031#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020032#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020033#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030034#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020035#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070036#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020037#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020045
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030046#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030047#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030048#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030049#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020050#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020051#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030052#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030053#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030055#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030056
Jiri Olsa1b43b702017-01-09 10:51:56 +010057struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010058 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010059 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010060 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010061 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010062 const char *str;
63 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010064};
65
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030066struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020067 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030068 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020069 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010070 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030071 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020072 struct perf_evlist *evlist;
73 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020074 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000076 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000078 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090079 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000080 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080081 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010082 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070083 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020084};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020085
Jiri Olsadc0c6122017-01-09 10:51:58 +010086static volatile int auxtrace_record__snapshot_started;
87static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
88static DEFINE_TRIGGER(switch_output_trigger);
89
90static bool switch_output_signal(struct record *rec)
91{
92 return rec->switch_output.signal &&
93 trigger_is_ready(&switch_output_trigger);
94}
95
96static bool switch_output_size(struct record *rec)
97{
98 return rec->switch_output.size &&
99 trigger_is_ready(&switch_output_trigger) &&
100 (rec->bytes_written >= rec->switch_output.size);
101}
102
Jiri Olsabfacbe32017-01-09 10:52:00 +0100103static bool switch_output_time(struct record *rec)
104{
105 return rec->switch_output.time &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200109static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
110 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200111{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200112 struct perf_data_file *file = &rec->session->data->file;
113
114 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100115 pr_err("failed to write perf data, error: %m\n");
116 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200117 }
David Ahern8d3eca22012-08-26 12:24:47 -0600118
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300119 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100120
121 if (switch_output_size(rec))
122 trigger_hit(&switch_output_trigger);
123
David Ahern8d3eca22012-08-26 12:24:47 -0600124 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200125}
126
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200127static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200128 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300129 struct perf_sample *sample __maybe_unused,
130 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200131{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300132 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200133 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200134}
135
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200136static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300137{
138 struct record *rec = to;
139
140 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200141 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300142}
143
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300144static volatile int done;
145static volatile int signr = -1;
146static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000147
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300148static void sig_handler(int sig)
149{
150 if (sig == SIGCHLD)
151 child_finished = 1;
152 else
153 signr = sig;
154
155 done = 1;
156}
157
Wang Nana0748652016-11-26 07:03:28 +0000158static void sigsegv_handler(int sig)
159{
160 perf_hooks__recover();
161 sighandler_dump_stack(sig);
162}
163
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300164static void record__sig_exit(void)
165{
166 if (signr == -1)
167 return;
168
169 signal(signr, SIG_DFL);
170 raise(signr);
171}
172
Adrian Huntere31f0d02015-04-30 17:37:27 +0300173#ifdef HAVE_AUXTRACE_SUPPORT
174
Adrian Hunteref149c22015-04-09 18:53:45 +0300175static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200176 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300177 union perf_event *event, void *data1,
178 size_t len1, void *data2, size_t len2)
179{
180 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100181 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300182 size_t padding;
183 u8 pad[8] = {0};
184
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100185 if (!perf_data__is_pipe(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300186 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100187 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300188 int err;
189
190 file_offset = lseek(fd, 0, SEEK_CUR);
191 if (file_offset == -1)
192 return -1;
193 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
194 event, file_offset);
195 if (err)
196 return err;
197 }
198
Adrian Hunteref149c22015-04-09 18:53:45 +0300199 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
200 padding = (len1 + len2) & 7;
201 if (padding)
202 padding = 8 - padding;
203
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200204 record__write(rec, map, event, event->header.size);
205 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300206 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200207 record__write(rec, map, data2, len2);
208 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300209
210 return 0;
211}
212
213static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200214 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300215{
216 int ret;
217
Jiri Olsae035f4c2018-09-13 14:54:05 +0200218 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300219 record__process_auxtrace);
220 if (ret < 0)
221 return ret;
222
223 if (ret)
224 rec->samples++;
225
226 return 0;
227}
228
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300229static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200230 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300231{
232 int ret;
233
Jiri Olsae035f4c2018-09-13 14:54:05 +0200234 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300235 record__process_auxtrace,
236 rec->opts.auxtrace_snapshot_size);
237 if (ret < 0)
238 return ret;
239
240 if (ret)
241 rec->samples++;
242
243 return 0;
244}
245
246static int record__auxtrace_read_snapshot_all(struct record *rec)
247{
248 int i;
249 int rc = 0;
250
251 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200252 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300253
Jiri Olsae035f4c2018-09-13 14:54:05 +0200254 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300255 continue;
256
Jiri Olsae035f4c2018-09-13 14:54:05 +0200257 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300258 rc = -1;
259 goto out;
260 }
261 }
262out:
263 return rc;
264}
265
266static void record__read_auxtrace_snapshot(struct record *rec)
267{
268 pr_debug("Recording AUX area tracing snapshot\n");
269 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000270 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300271 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000272 if (auxtrace_record__snapshot_finish(rec->itr))
273 trigger_error(&auxtrace_snapshot_trigger);
274 else
275 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300276 }
277}
278
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200279static int record__auxtrace_init(struct record *rec)
280{
281 int err;
282
283 if (!rec->itr) {
284 rec->itr = auxtrace_record__init(rec->evlist, &err);
285 if (err)
286 return err;
287 }
288
289 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
290 rec->opts.auxtrace_snapshot_opts);
291 if (err)
292 return err;
293
294 return auxtrace_parse_filters(rec->evlist);
295}
296
Adrian Huntere31f0d02015-04-30 17:37:27 +0300297#else
298
299static inline
300int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200301 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300302{
303 return 0;
304}
305
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300306static inline
307void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
308{
309}
310
311static inline
312int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
313{
314 return 0;
315}
316
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200317static int record__auxtrace_init(struct record *rec __maybe_unused)
318{
319 return 0;
320}
321
Adrian Huntere31f0d02015-04-30 17:37:27 +0300322#endif
323
Wang Nancda57a82016-06-27 10:24:03 +0000324static int record__mmap_evlist(struct record *rec,
325 struct perf_evlist *evlist)
326{
327 struct record_opts *opts = &rec->opts;
328 char msg[512];
329
Wang Nan7a276ff2017-12-03 02:00:38 +0000330 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000331 opts->auxtrace_mmap_pages,
332 opts->auxtrace_snapshot_mode) < 0) {
333 if (errno == EPERM) {
334 pr_err("Permission error mapping pages.\n"
335 "Consider increasing "
336 "/proc/sys/kernel/perf_event_mlock_kb,\n"
337 "or try again with a smaller value of -m/--mmap_pages.\n"
338 "(current value: %u,%u)\n",
339 opts->mmap_pages, opts->auxtrace_mmap_pages);
340 return -errno;
341 } else {
342 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300343 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000344 if (errno)
345 return -errno;
346 else
347 return -EINVAL;
348 }
349 }
350 return 0;
351}
352
353static int record__mmap(struct record *rec)
354{
355 return record__mmap_evlist(rec, rec->evlist);
356}
357
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300358static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200359{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300360 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200361 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200362 struct perf_evlist *evlist = rec->evlist;
363 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300364 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600365 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600366 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200367
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300368 /*
369 * For initial_delay we need to add a dummy event so that we can track
370 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
371 * real events, the ones asked by the user.
372 */
373 if (opts->initial_delay) {
374 if (perf_evlist__add_dummy(evlist))
375 return -ENOMEM;
376
377 pos = perf_evlist__first(evlist);
378 pos->tracking = 0;
379 pos = perf_evlist__last(evlist);
380 pos->tracking = 1;
381 pos->attr.enable_on_exec = 1;
382 }
383
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300384 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100385
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300386 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200387try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400388 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300389 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900390 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300391 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300392 goto try_again;
393 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700394 if ((errno == EINVAL || errno == EBADF) &&
395 pos->leader != pos &&
396 pos->weak_group) {
397 pos = perf_evlist__reset_weak_group(evlist, pos);
398 goto try_again;
399 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300400 rc = -errno;
401 perf_evsel__open_strerror(pos, &opts->target,
402 errno, msg, sizeof(msg));
403 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600404 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300405 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800406
407 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800408 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200409
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300410 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300411 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300412 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300413 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600414 rc = -1;
415 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100416 }
417
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600418 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300419 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600420 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
421 str_error_r(errno, msg, sizeof(msg)));
422 rc = -1;
423 goto out;
424 }
425
Wang Nancda57a82016-06-27 10:24:03 +0000426 rc = record__mmap(rec);
427 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600428 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200429
Jiri Olsa563aecb2013-06-05 13:35:06 +0200430 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300431 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600432out:
433 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200434}
435
Namhyung Kime3d59112015-01-29 17:06:44 +0900436static int process_sample_event(struct perf_tool *tool,
437 union perf_event *event,
438 struct perf_sample *sample,
439 struct perf_evsel *evsel,
440 struct machine *machine)
441{
442 struct record *rec = container_of(tool, struct record, tool);
443
Jin Yao68588ba2017-12-08 21:13:42 +0800444 if (rec->evlist->first_sample_time == 0)
445 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900446
Jin Yao68588ba2017-12-08 21:13:42 +0800447 rec->evlist->last_sample_time = sample->time;
448
449 if (rec->buildid_all)
450 return 0;
451
452 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900453 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
454}
455
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300456static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200457{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100458 struct perf_data *data = &rec->data;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200459 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200460
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100461 if (data->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300462 return 0;
463
Namhyung Kim00dc8652014-11-04 10:14:32 +0900464 /*
465 * During this process, it'll load kernel map and replace the
466 * dso->long_name to a real pathname it found. In this case
467 * we prefer the vmlinux path like
468 * /lib/modules/3.16.4/build/vmlinux
469 *
470 * rather than build-id path (in debug directory).
471 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
472 */
473 symbol_conf.ignore_vmlinux_buildid = true;
474
Namhyung Kim61566812016-01-11 22:37:09 +0900475 /*
476 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800477 * so no need to process samples. But if timestamp_boundary is enabled,
478 * it still needs to walk on all samples to get the timestamps of
479 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900480 */
Jin Yao68588ba2017-12-08 21:13:42 +0800481 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900482 rec->tool.sample = NULL;
483
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300484 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200485}
486
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200487static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800488{
489 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200490 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800491 /*
492 *As for guest kernel when processing subcommand record&report,
493 *we arrange module mmap prior to guest kernel mmap and trigger
494 *a preload dso because default guest module symbols are loaded
495 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
496 *method is used to avoid symbol missing when the first addr is
497 *in module instead of in guest kernel.
498 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200499 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200500 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800501 if (err < 0)
502 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300503 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800504
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800505 /*
506 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
507 * have no _text sometimes.
508 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200509 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200510 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800511 if (err < 0)
512 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300513 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800514}
515
Frederic Weisbecker98402802010-05-02 22:05:29 +0200516static struct perf_event_header finished_round_event = {
517 .size = sizeof(struct perf_event_header),
518 .type = PERF_RECORD_FINISHED_ROUND,
519};
520
Wang Nana4ea0ec2016-07-14 08:34:36 +0000521static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Wang Nan0b72d692017-12-04 16:51:07 +0000522 bool overwrite)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200523{
Jiri Olsadcabb502014-07-25 16:56:16 +0200524 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200525 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600526 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000527 struct perf_mmap *maps;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200528
Wang Nancb216862016-06-27 10:24:04 +0000529 if (!evlist)
530 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300531
Wang Nan0b72d692017-12-04 16:51:07 +0000532 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000533 if (!maps)
534 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000535
Wang Nan0b72d692017-12-04 16:51:07 +0000536 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000537 return 0;
538
Wang Nana4ea0ec2016-07-14 08:34:36 +0000539 for (i = 0; i < evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200540 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000541
Jiri Olsae035f4c2018-09-13 14:54:05 +0200542 if (map->base) {
543 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600544 rc = -1;
545 goto out;
546 }
547 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300548
Jiri Olsae035f4c2018-09-13 14:54:05 +0200549 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
550 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300551 rc = -1;
552 goto out;
553 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200554 }
555
Jiri Olsadcabb502014-07-25 16:56:16 +0200556 /*
557 * Mark the round finished in case we wrote
558 * at least one event.
559 */
560 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200561 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600562
Wang Nan0b72d692017-12-04 16:51:07 +0000563 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000564 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600565out:
566 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200567}
568
Wang Nancb216862016-06-27 10:24:04 +0000569static int record__mmap_read_all(struct record *rec)
570{
571 int err;
572
Wang Nana4ea0ec2016-07-14 08:34:36 +0000573 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000574 if (err)
575 return err;
576
Wang Nan057374642016-07-14 08:34:43 +0000577 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000578}
579
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300580static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700581{
David Ahern57706ab2013-11-06 11:41:34 -0700582 struct perf_session *session = rec->session;
583 int feat;
584
585 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
586 perf_header__set_feat(&session->header, feat);
587
588 if (rec->no_buildid)
589 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
590
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300591 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700592 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
593
594 if (!rec->opts.branch_stack)
595 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300596
597 if (!rec->opts.full_auxtrace)
598 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100599
Alexey Budankovcf790512018-10-09 17:36:24 +0300600 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
601 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
602
Jiri Olsaffa517a2015-10-25 15:51:43 +0100603 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700604}
605
Wang Nane1ab48b2016-02-26 09:32:10 +0000606static void
607record__finish_output(struct record *rec)
608{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100609 struct perf_data *data = &rec->data;
610 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000611
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100612 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000613 return;
614
615 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100616 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000617
618 if (!rec->no_buildid) {
619 process_buildids(rec);
620
621 if (rec->buildid_all)
622 dsos__hit_all(rec->session);
623 }
624 perf_session__write_header(rec->session, rec->evlist, fd, true);
625
626 return;
627}
628
Wang Nan4ea648a2016-07-14 08:34:47 +0000629static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000630{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300631 int err;
632 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000633
Wang Nan4ea648a2016-07-14 08:34:47 +0000634 if (rec->opts.tail_synthesize != tail)
635 return 0;
636
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300637 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
638 if (thread_map == NULL)
639 return -1;
640
641 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000642 process_synthesized_event,
643 &rec->session->machines.host,
644 rec->opts.sample_address,
645 rec->opts.proc_map_timeout);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300646 thread_map__put(thread_map);
647 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000648}
649
Wang Nan4ea648a2016-07-14 08:34:47 +0000650static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000651
Wang Nanecfd7a92016-04-13 08:21:07 +0000652static int
653record__switch_output(struct record *rec, bool at_exit)
654{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100655 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000656 int fd, err;
657
658 /* Same Size: "2015122520103046"*/
659 char timestamp[] = "InvalidTimestamp";
660
Wang Nan4ea648a2016-07-14 08:34:47 +0000661 record__synthesize(rec, true);
662 if (target__none(&rec->opts.target))
663 record__synthesize_workload(rec, true);
664
Wang Nanecfd7a92016-04-13 08:21:07 +0000665 rec->samples = 0;
666 record__finish_output(rec);
667 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
668 if (err) {
669 pr_err("Failed to get current timestamp\n");
670 return -EINVAL;
671 }
672
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100673 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000674 rec->session->header.data_offset,
675 at_exit);
676 if (fd >= 0 && !at_exit) {
677 rec->bytes_written = 0;
678 rec->session->header.data_size = 0;
679 }
680
681 if (!quiet)
682 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +0100683 data->file.path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000684
685 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000686 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000687 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000688
Wang Nanbe7b0c92016-04-20 18:59:54 +0000689 /*
690 * In 'perf record --switch-output' without -a,
691 * record__synthesize() in record__switch_output() won't
692 * generate tracking events because there's no thread_map
693 * in evlist. Which causes newly created perf.data doesn't
694 * contain map and comm information.
695 * Create a fake thread_map and directly call
696 * perf_event__synthesize_thread_map() for those events.
697 */
698 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000699 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000700 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000701 return fd;
702}
703
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300704static volatile int workload_exec_errno;
705
706/*
707 * perf_evlist__prepare_workload will send a SIGUSR1
708 * if the fork fails, since we asked by setting its
709 * want_signal to true.
710 */
Namhyung Kim45604712014-05-12 09:47:24 +0900711static void workload_exec_failed_signal(int signo __maybe_unused,
712 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300713 void *ucontext __maybe_unused)
714{
715 workload_exec_errno = info->si_value.sival_int;
716 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300717 child_finished = 1;
718}
719
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300720static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100721static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300722
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200723int __weak
724perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
725 struct perf_tool *tool __maybe_unused,
726 perf_event__handler_t process __maybe_unused,
727 struct machine *machine __maybe_unused)
728{
729 return 0;
730}
731
Wang Nanee667f92016-06-27 10:24:05 +0000732static const struct perf_event_mmap_page *
733perf_evlist__pick_pc(struct perf_evlist *evlist)
734{
Wang Nanb2cb6152016-07-14 08:34:39 +0000735 if (evlist) {
736 if (evlist->mmap && evlist->mmap[0].base)
737 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +0000738 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
739 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +0000740 }
Wang Nanee667f92016-06-27 10:24:05 +0000741 return NULL;
742}
743
Wang Nanc45628b2016-05-24 02:28:59 +0000744static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
745{
Wang Nanee667f92016-06-27 10:24:05 +0000746 const struct perf_event_mmap_page *pc;
747
748 pc = perf_evlist__pick_pc(rec->evlist);
749 if (pc)
750 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000751 return NULL;
752}
753
Wang Nan4ea648a2016-07-14 08:34:47 +0000754static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000755{
756 struct perf_session *session = rec->session;
757 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100758 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000759 struct record_opts *opts = &rec->opts;
760 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100761 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +0000762 int err = 0;
763
Wang Nan4ea648a2016-07-14 08:34:47 +0000764 if (rec->opts.tail_synthesize != tail)
765 return 0;
766
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100767 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +0100768 /*
769 * We need to synthesize events first, because some
770 * features works on top of them (on report side).
771 */
Jiri Olsa318ec182018-08-30 08:32:15 +0200772 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +0000773 process_synthesized_event);
774 if (err < 0) {
775 pr_err("Couldn't synthesize attrs.\n");
776 goto out;
777 }
778
Jiri Olsaa2015512018-03-14 10:22:04 +0100779 err = perf_event__synthesize_features(tool, session, rec->evlist,
780 process_synthesized_event);
781 if (err < 0) {
782 pr_err("Couldn't synthesize features.\n");
783 return err;
784 }
785
Wang Nanc45c86e2016-02-26 09:32:07 +0000786 if (have_tracepoints(&rec->evlist->entries)) {
787 /*
788 * FIXME err <= 0 here actually means that
789 * there were no tracepoints so its not really
790 * an error, just that we don't need to
791 * synthesize anything. We really have to
792 * return this more properly and also
793 * propagate errors that now are calling die()
794 */
795 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
796 process_synthesized_event);
797 if (err <= 0) {
798 pr_err("Couldn't record tracing data.\n");
799 goto out;
800 }
801 rec->bytes_written += err;
802 }
803 }
804
Wang Nanc45628b2016-05-24 02:28:59 +0000805 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200806 process_synthesized_event, machine);
807 if (err)
808 goto out;
809
Wang Nanc45c86e2016-02-26 09:32:07 +0000810 if (rec->opts.full_auxtrace) {
811 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
812 session, process_synthesized_event);
813 if (err)
814 goto out;
815 }
816
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -0300817 if (!perf_evlist__exclude_kernel(rec->evlist)) {
818 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
819 machine);
820 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
821 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
822 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +0000823
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -0300824 err = perf_event__synthesize_modules(tool, process_synthesized_event,
825 machine);
826 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
827 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
828 "Check /proc/modules permission or run as root.\n");
829 }
Wang Nanc45c86e2016-02-26 09:32:07 +0000830
831 if (perf_guest) {
832 machines__process_guests(&session->machines,
833 perf_event__synthesize_guest_os, tool);
834 }
835
Andi Kleenbfd8f722017-11-17 13:42:58 -0800836 err = perf_event__synthesize_extra_attr(&rec->tool,
837 rec->evlist,
838 process_synthesized_event,
839 data->is_pipe);
840 if (err)
841 goto out;
842
Andi Kleen373565d2017-11-17 13:42:59 -0800843 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
844 process_synthesized_event,
845 NULL);
846 if (err < 0) {
847 pr_err("Couldn't synthesize thread map.\n");
848 return err;
849 }
850
851 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
852 process_synthesized_event, NULL);
853 if (err < 0) {
854 pr_err("Couldn't synthesize cpu map.\n");
855 return err;
856 }
857
Wang Nanc45c86e2016-02-26 09:32:07 +0000858 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
859 process_synthesized_event, opts->sample_address,
Kan Liang340b47f2017-09-29 07:47:54 -0700860 opts->proc_map_timeout, 1);
Wang Nanc45c86e2016-02-26 09:32:07 +0000861out:
862 return err;
863}
864
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300865static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200866{
David Ahern57706ab2013-11-06 11:41:34 -0700867 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900868 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200869 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300870 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200871 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300872 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100873 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200874 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300875 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900876 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200877
Namhyung Kim45604712014-05-12 09:47:24 +0900878 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200879 signal(SIGCHLD, sig_handler);
880 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600881 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +0000882 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000883
Hari Bathinif3b36142017-03-08 02:11:43 +0530884 if (rec->opts.record_namespaces)
885 tool->namespace_events = true;
886
Jiri Olsadc0c6122017-01-09 10:51:58 +0100887 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300888 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000889 if (rec->opts.auxtrace_snapshot_mode)
890 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +0100891 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000892 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000893 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300894 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000895 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200896
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100897 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200898 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900899 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200900 return -1;
901 }
902
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100903 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200904 rec->session = session;
905
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300906 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100907
Alexey Budankovcf790512018-10-09 17:36:24 +0300908 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
909 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
910
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200911 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300912 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100913 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300914 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200915 if (err < 0) {
916 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900917 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200918 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200919 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100920 }
921
Jiri Olsaad46e48c2018-03-02 17:13:54 +0100922 /*
923 * If we have just single event and are sending data
924 * through pipe, we need to force the ids allocation,
925 * because we synthesize event name through the pipe
926 * and need the id for that.
927 */
928 if (data->is_pipe && rec->evlist->nr_entries == 1)
929 rec->opts.sample_id = true;
930
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300931 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600932 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900933 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600934 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200935
Wang Nan8690a2a2016-02-22 09:10:32 +0000936 err = bpf__apply_obj_config();
937 if (err) {
938 char errbuf[BUFSIZ];
939
940 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
941 pr_err("ERROR: Apply config to BPF failed: %s\n",
942 errbuf);
943 goto out_child;
944 }
945
Adrian Huntercca84822015-08-19 17:29:21 +0300946 /*
947 * Normally perf_session__new would do this, but it doesn't have the
948 * evlist.
949 */
950 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
951 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
952 rec->tool.ordered_events = false;
953 }
954
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300955 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900956 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
957
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100958 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900959 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500960 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900961 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200962 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900963 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200964 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900965 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200966 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200967
David Ahernd3665492012-02-06 15:27:52 -0700968 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100969 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700970 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100971 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600972 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900973 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100974 }
975
Wang Nan4ea648a2016-07-14 08:34:47 +0000976 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +0000977 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900978 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600979
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200980 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200981 struct sched_param param;
982
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200983 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200984 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200985 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600986 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900987 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200988 }
989 }
990
Jiri Olsa774cb492012-11-12 18:34:01 +0100991 /*
992 * When perf is starting the traced process, all the events
993 * (apart from group members) have enable_on_exec=1 set,
994 * so don't spoil it by prematurely enabling them.
995 */
Andi Kleen6619a532014-01-11 13:38:27 -0800996 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300997 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600998
Peter Zijlstra856e9662009-12-16 17:55:55 +0100999 /*
1000 * Let the child rip
1001 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001002 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001003 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001004 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301005 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001006
1007 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1008 if (event == NULL) {
1009 err = -ENOMEM;
1010 goto out_child;
1011 }
1012
Namhyung Kime803cf92015-09-22 09:24:55 +09001013 /*
1014 * Some H/W events are generated before COMM event
1015 * which is emitted during exec(), so perf script
1016 * cannot see a correct process name for those events.
1017 * Synthesize COMM event to prevent it.
1018 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301019 tgid = perf_event__synthesize_comm(tool, event,
1020 rec->evlist->workload.pid,
1021 process_synthesized_event,
1022 machine);
1023 free(event);
1024
1025 if (tgid == -1)
1026 goto out_child;
1027
1028 event = malloc(sizeof(event->namespaces) +
1029 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1030 machine->id_hdr_size);
1031 if (event == NULL) {
1032 err = -ENOMEM;
1033 goto out_child;
1034 }
1035
1036 /*
1037 * Synthesize NAMESPACES event for the command specified.
1038 */
1039 perf_event__synthesize_namespaces(tool, event,
1040 rec->evlist->workload.pid,
1041 tgid, process_synthesized_event,
1042 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001043 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001044
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001045 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001046 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001047
Andi Kleen6619a532014-01-11 13:38:27 -08001048 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001049 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001050 perf_evlist__enable(rec->evlist);
1051 }
1052
Wang Nan5f9cf592016-04-20 18:59:49 +00001053 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001054 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001055 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001056 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001057 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001058
Wang Nan057374642016-07-14 08:34:43 +00001059 /*
1060 * rec->evlist->bkw_mmap_state is possible to be
1061 * BKW_MMAP_EMPTY here: when done == true and
1062 * hits != rec->samples in previous round.
1063 *
1064 * perf_evlist__toggle_bkw_mmap ensure we never
1065 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1066 */
1067 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1068 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1069
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001070 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001071 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001072 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001073 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001074 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001075 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001076
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001077 if (auxtrace_record__snapshot_started) {
1078 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001079 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001080 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001081 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001082 pr_err("AUX area tracing snapshot failed\n");
1083 err = -1;
1084 goto out_child;
1085 }
1086 }
1087
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001088 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001089 /*
1090 * If switch_output_trigger is hit, the data in
1091 * overwritable ring buffer should have been collected,
1092 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1093 *
1094 * If SIGUSR2 raise after or during record__mmap_read_all(),
1095 * record__mmap_read_all() didn't collect data from
1096 * overwritable ring buffer. Read again.
1097 */
1098 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1099 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001100 trigger_ready(&switch_output_trigger);
1101
Wang Nan057374642016-07-14 08:34:43 +00001102 /*
1103 * Reenable events in overwrite ring buffer after
1104 * record__mmap_read_all(): we should have collected
1105 * data from it.
1106 */
1107 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1108
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001109 if (!quiet)
1110 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1111 waking);
1112 waking = 0;
1113 fd = record__switch_output(rec, false);
1114 if (fd < 0) {
1115 pr_err("Failed to switch to new file\n");
1116 trigger_error(&switch_output_trigger);
1117 err = fd;
1118 goto out_child;
1119 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001120
1121 /* re-arm the alarm */
1122 if (rec->switch_output.time)
1123 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001124 }
1125
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001126 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001127 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001128 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001129 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001130 /*
1131 * Propagate error, only if there's any. Ignore positive
1132 * number of returned events and interrupt error.
1133 */
1134 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001135 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001136 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001137
1138 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1139 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001140 }
1141
Jiri Olsa774cb492012-11-12 18:34:01 +01001142 /*
1143 * When perf is starting the traced process, at the end events
1144 * die with the process and we wait for that. Thus no need to
1145 * disable events in this case.
1146 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001147 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001148 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001149 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001150 disabled = true;
1151 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001152 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001153 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001154 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001155
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001156 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001157 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001158 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001159 pr_err("Workload failed: %s\n", emsg);
1160 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001161 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001162 }
1163
Namhyung Kime3d59112015-01-29 17:06:44 +09001164 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001165 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001166
Wang Nan4ea648a2016-07-14 08:34:47 +00001167 if (target__none(&rec->opts.target))
1168 record__synthesize_workload(rec, true);
1169
Namhyung Kim45604712014-05-12 09:47:24 +09001170out_child:
1171 if (forks) {
1172 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001173
Namhyung Kim45604712014-05-12 09:47:24 +09001174 if (!child_finished)
1175 kill(rec->evlist->workload.pid, SIGTERM);
1176
1177 wait(&exit_status);
1178
1179 if (err < 0)
1180 status = err;
1181 else if (WIFEXITED(exit_status))
1182 status = WEXITSTATUS(exit_status);
1183 else if (WIFSIGNALED(exit_status))
1184 signr = WTERMSIG(exit_status);
1185 } else
1186 status = err;
1187
Wang Nan4ea648a2016-07-14 08:34:47 +00001188 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001189 /* this will be recalculated during process_buildids() */
1190 rec->samples = 0;
1191
Wang Nanecfd7a92016-04-13 08:21:07 +00001192 if (!err) {
1193 if (!rec->timestamp_filename) {
1194 record__finish_output(rec);
1195 } else {
1196 fd = record__switch_output(rec, true);
1197 if (fd < 0) {
1198 status = fd;
1199 goto out_delete_session;
1200 }
1201 }
1202 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001203
Wang Nana0748652016-11-26 07:03:28 +00001204 perf_hooks__invoke_record_end();
1205
Namhyung Kime3d59112015-01-29 17:06:44 +09001206 if (!err && !quiet) {
1207 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001208 const char *postfix = rec->timestamp_filename ?
1209 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001210
Adrian Hunteref149c22015-04-09 18:53:45 +03001211 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001212 scnprintf(samples, sizeof(samples),
1213 " (%" PRIu64 " samples)", rec->samples);
1214 else
1215 samples[0] = '\0';
1216
Wang Nanecfd7a92016-04-13 08:21:07 +00001217 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001218 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001219 data->file.path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001220 }
1221
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001222out_delete_session:
1223 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001224 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001225}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001226
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001227static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001228{
Kan Liangaad2b212015-01-05 13:23:04 -05001229 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001230
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001231 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001232
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001233 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001234 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001235 callchain->dump_size);
1236}
1237
1238int record_opts__parse_callchain(struct record_opts *record,
1239 struct callchain_param *callchain,
1240 const char *arg, bool unset)
1241{
1242 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001243 callchain->enabled = !unset;
1244
1245 /* --no-call-graph */
1246 if (unset) {
1247 callchain->record_mode = CALLCHAIN_NONE;
1248 pr_debug("callchain: disabled\n");
1249 return 0;
1250 }
1251
1252 ret = parse_callchain_record_opt(arg, callchain);
1253 if (!ret) {
1254 /* Enable data address sampling for DWARF unwind. */
1255 if (callchain->record_mode == CALLCHAIN_DWARF)
1256 record->sample_address = true;
1257 callchain_debug(callchain);
1258 }
1259
1260 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001261}
1262
Kan Liangc421e802015-07-29 05:42:12 -04001263int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001264 const char *arg,
1265 int unset)
1266{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001267 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001268}
1269
Kan Liangc421e802015-07-29 05:42:12 -04001270int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001271 const char *arg __maybe_unused,
1272 int unset __maybe_unused)
1273{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001274 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001275
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001276 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001277
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001278 if (callchain->record_mode == CALLCHAIN_NONE)
1279 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001280
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001281 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001282 return 0;
1283}
1284
Jiri Olsaeb853e82014-02-03 12:44:42 +01001285static int perf_record_config(const char *var, const char *value, void *cb)
1286{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001287 struct record *rec = cb;
1288
1289 if (!strcmp(var, "record.build-id")) {
1290 if (!strcmp(value, "cache"))
1291 rec->no_buildid_cache = false;
1292 else if (!strcmp(value, "no-cache"))
1293 rec->no_buildid_cache = true;
1294 else if (!strcmp(value, "skip"))
1295 rec->no_buildid = true;
1296 else
1297 return -1;
1298 return 0;
1299 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001300 if (!strcmp(var, "record.call-graph")) {
1301 var = "call-graph.record-mode";
1302 return perf_default_config(var, value, cb);
1303 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001304
Yisheng Xiecff17202018-03-12 19:25:57 +08001305 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001306}
1307
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001308struct clockid_map {
1309 const char *name;
1310 int clockid;
1311};
1312
1313#define CLOCKID_MAP(n, c) \
1314 { .name = n, .clockid = (c), }
1315
1316#define CLOCKID_END { .name = NULL, }
1317
1318
1319/*
1320 * Add the missing ones, we need to build on many distros...
1321 */
1322#ifndef CLOCK_MONOTONIC_RAW
1323#define CLOCK_MONOTONIC_RAW 4
1324#endif
1325#ifndef CLOCK_BOOTTIME
1326#define CLOCK_BOOTTIME 7
1327#endif
1328#ifndef CLOCK_TAI
1329#define CLOCK_TAI 11
1330#endif
1331
1332static const struct clockid_map clockids[] = {
1333 /* available for all events, NMI safe */
1334 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1335 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1336
1337 /* available for some events */
1338 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1339 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1340 CLOCKID_MAP("tai", CLOCK_TAI),
1341
1342 /* available for the lazy */
1343 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1344 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1345 CLOCKID_MAP("real", CLOCK_REALTIME),
1346 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1347
1348 CLOCKID_END,
1349};
1350
Alexey Budankovcf790512018-10-09 17:36:24 +03001351static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1352{
1353 struct timespec res;
1354
1355 *res_ns = 0;
1356 if (!clock_getres(clk_id, &res))
1357 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1358 else
1359 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1360
1361 return 0;
1362}
1363
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001364static int parse_clockid(const struct option *opt, const char *str, int unset)
1365{
1366 struct record_opts *opts = (struct record_opts *)opt->value;
1367 const struct clockid_map *cm;
1368 const char *ostr = str;
1369
1370 if (unset) {
1371 opts->use_clockid = 0;
1372 return 0;
1373 }
1374
1375 /* no arg passed */
1376 if (!str)
1377 return 0;
1378
1379 /* no setting it twice */
1380 if (opts->use_clockid)
1381 return -1;
1382
1383 opts->use_clockid = true;
1384
1385 /* if its a number, we're done */
1386 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001387 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001388
1389 /* allow a "CLOCK_" prefix to the name */
1390 if (!strncasecmp(str, "CLOCK_", 6))
1391 str += 6;
1392
1393 for (cm = clockids; cm->name; cm++) {
1394 if (!strcasecmp(str, cm->name)) {
1395 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001396 return get_clockid_res(opts->clockid,
1397 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001398 }
1399 }
1400
1401 opts->use_clockid = false;
1402 ui__warning("unknown clockid %s, check man page\n", ostr);
1403 return -1;
1404}
1405
Adrian Huntere9db1312015-04-09 18:53:46 +03001406static int record__parse_mmap_pages(const struct option *opt,
1407 const char *str,
1408 int unset __maybe_unused)
1409{
1410 struct record_opts *opts = opt->value;
1411 char *s, *p;
1412 unsigned int mmap_pages;
1413 int ret;
1414
1415 if (!str)
1416 return -EINVAL;
1417
1418 s = strdup(str);
1419 if (!s)
1420 return -ENOMEM;
1421
1422 p = strchr(s, ',');
1423 if (p)
1424 *p = '\0';
1425
1426 if (*s) {
1427 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1428 if (ret)
1429 goto out_free;
1430 opts->mmap_pages = mmap_pages;
1431 }
1432
1433 if (!p) {
1434 ret = 0;
1435 goto out_free;
1436 }
1437
1438 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1439 if (ret)
1440 goto out_free;
1441
1442 opts->auxtrace_mmap_pages = mmap_pages;
1443
1444out_free:
1445 free(s);
1446 return ret;
1447}
1448
Jiri Olsa0c582442017-01-09 10:51:59 +01001449static void switch_output_size_warn(struct record *rec)
1450{
1451 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1452 struct switch_output *s = &rec->switch_output;
1453
1454 wakeup_size /= 2;
1455
1456 if (s->size < wakeup_size) {
1457 char buf[100];
1458
1459 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1460 pr_warning("WARNING: switch-output data size lower than "
1461 "wakeup kernel buffer size (%s) "
1462 "expect bigger perf.data sizes\n", buf);
1463 }
1464}
1465
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001466static int switch_output_setup(struct record *rec)
1467{
1468 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001469 static struct parse_tag tags_size[] = {
1470 { .tag = 'B', .mult = 1 },
1471 { .tag = 'K', .mult = 1 << 10 },
1472 { .tag = 'M', .mult = 1 << 20 },
1473 { .tag = 'G', .mult = 1 << 30 },
1474 { .tag = 0 },
1475 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001476 static struct parse_tag tags_time[] = {
1477 { .tag = 's', .mult = 1 },
1478 { .tag = 'm', .mult = 60 },
1479 { .tag = 'h', .mult = 60*60 },
1480 { .tag = 'd', .mult = 60*60*24 },
1481 { .tag = 0 },
1482 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001483 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001484
1485 if (!s->set)
1486 return 0;
1487
1488 if (!strcmp(s->str, "signal")) {
1489 s->signal = true;
1490 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001491 goto enabled;
1492 }
1493
1494 val = parse_tag_value(s->str, tags_size);
1495 if (val != (unsigned long) -1) {
1496 s->size = val;
1497 pr_debug("switch-output with %s size threshold\n", s->str);
1498 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001499 }
1500
Jiri Olsabfacbe32017-01-09 10:52:00 +01001501 val = parse_tag_value(s->str, tags_time);
1502 if (val != (unsigned long) -1) {
1503 s->time = val;
1504 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1505 s->str, s->time);
1506 goto enabled;
1507 }
1508
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001509 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001510
1511enabled:
1512 rec->timestamp_filename = true;
1513 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001514
1515 if (s->size && !rec->opts.no_buffering)
1516 switch_output_size_warn(rec);
1517
Jiri Olsadc0c6122017-01-09 10:51:58 +01001518 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001519}
1520
Namhyung Kime5b2c202014-10-23 00:15:46 +09001521static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001522 "perf record [<options>] [<command>]",
1523 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001524 NULL
1525};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001526const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001527
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001528/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001529 * XXX Ideally would be local to cmd_record() and passed to a record__new
1530 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001531 * after cmd_record() exits, but since record_options need to be accessible to
1532 * builtin-script, leave it here.
1533 *
1534 * At least we don't ouch it in all the other functions here directly.
1535 *
1536 * Just say no to tons of global variables, sigh.
1537 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001538static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001539 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001540 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001541 .mmap_pages = UINT_MAX,
1542 .user_freq = UINT_MAX,
1543 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001544 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001545 .target = {
1546 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001547 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001548 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001549 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001550 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001551 .tool = {
1552 .sample = process_sample_event,
1553 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001554 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001555 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301556 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001557 .mmap = perf_event__process_mmap,
1558 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001559 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001560 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001561};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001562
Namhyung Kim76a26542015-10-22 23:28:32 +09001563const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1564 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001565
Wang Nan0aab2132016-06-16 08:02:41 +00001566static bool dry_run;
1567
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001568/*
1569 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1570 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001571 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001572 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1573 * using pipes, etc.
1574 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001575static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001576 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001577 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001578 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001579 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001580 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001581 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1582 NULL, "don't record events from perf itself",
1583 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001584 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001585 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001586 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001587 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001588 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001589 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001590 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001591 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001592 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001593 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001594 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001595 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001596 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001597 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001598 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001599 OPT_STRING('o', "output", &record.data.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001600 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001601 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1602 &record.opts.no_inherit_set,
1603 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001604 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1605 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001606 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001607 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1608 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001609 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1610 "profile at this frequency",
1611 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001612 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1613 "number of mmap data pages and AUX area tracing mmap pages",
1614 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001615 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001616 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001617 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001618 NULL, "enables call-graph recording" ,
1619 &record_callchain_opt),
1620 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001621 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001622 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001623 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001624 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001625 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001626 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001627 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001628 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001629 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1630 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001631 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001632 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1633 &record.opts.sample_time_set,
1634 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001635 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1636 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001637 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001638 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001639 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1640 &record.no_buildid_cache_set,
1641 "do not update the buildid cache"),
1642 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1643 &record.no_buildid_set,
1644 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001645 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001646 "monitor event in cgroup name only",
1647 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001648 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001649 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001650 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1651 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001652
1653 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1654 "branch any", "sample any taken branches",
1655 parse_branch_stack),
1656
1657 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1658 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001659 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001660 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1661 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001662 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1663 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001664 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1665 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001666 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1667 "sample selected machine registers on interrupt,"
1668 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001669 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1670 "sample selected machine registers on interrupt,"
1671 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001672 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1673 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001674 OPT_CALLBACK('k', "clockid", &record.opts,
1675 "clockid", "clockid to use for events, see clock_gettime()",
1676 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001677 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1678 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001679 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1680 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301681 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1682 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001683 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1684 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001685 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1686 "Configure all used events to run in kernel space.",
1687 PARSE_OPT_EXCLUSIVE),
1688 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1689 "Configure all used events to run in user space.",
1690 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001691 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1692 "clang binary to use for compiling BPF scriptlets"),
1693 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1694 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001695 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1696 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001697 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1698 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001699 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1700 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08001701 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1702 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001703 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001704 &record.switch_output.set, "signal,size,time",
1705 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001706 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001707 OPT_BOOLEAN(0, "dry-run", &dry_run,
1708 "Parse options then exit"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001709 OPT_END()
1710};
1711
Namhyung Kime5b2c202014-10-23 00:15:46 +09001712struct option *record_options = __record_options;
1713
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001714int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001715{
Adrian Hunteref149c22015-04-09 18:53:45 +03001716 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001717 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001718 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001719
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001720 setlocale(LC_ALL, "");
1721
Wang Nan48e1cab2015-12-14 10:39:22 +00001722#ifndef HAVE_LIBBPF_SUPPORT
1723# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1724 set_nobuild('\0', "clang-path", true);
1725 set_nobuild('\0', "clang-opt", true);
1726# undef set_nobuild
1727#endif
1728
He Kuang7efe0e02015-12-14 10:39:23 +00001729#ifndef HAVE_BPF_PROLOGUE
1730# if !defined (HAVE_DWARF_SUPPORT)
1731# define REASON "NO_DWARF=1"
1732# elif !defined (HAVE_LIBBPF_SUPPORT)
1733# define REASON "NO_LIBBPF=1"
1734# else
1735# define REASON "this architecture doesn't support BPF prologue"
1736# endif
1737# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1738 set_nobuild('\0', "vmlinux", true);
1739# undef set_nobuild
1740# undef REASON
1741#endif
1742
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001743 rec->evlist = perf_evlist__new();
1744 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001745 return -ENOMEM;
1746
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001747 err = perf_config(perf_record_config, rec);
1748 if (err)
1749 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001750
Tom Zanussibca647a2010-11-10 08:11:30 -06001751 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001752 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09001753 if (quiet)
1754 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01001755
1756 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001757 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01001758 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001759
Namhyung Kimbea03402012-04-26 14:15:15 +09001760 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001761 usage_with_options_msg(record_usage, record_options,
1762 "cgroup monitoring only available in system-wide mode");
1763
Stephane Eranian023695d2011-02-14 11:20:01 +02001764 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001765 if (rec->opts.record_switch_events &&
1766 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001767 ui__error("kernel does not support recording context switch events\n");
1768 parse_options_usage(record_usage, record_options, "switch-events", 0);
1769 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001770 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001771
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001772 if (switch_output_setup(rec)) {
1773 parse_options_usage(record_usage, record_options, "switch-output", 0);
1774 return -EINVAL;
1775 }
1776
Jiri Olsabfacbe32017-01-09 10:52:00 +01001777 if (rec->switch_output.time) {
1778 signal(SIGALRM, alarm_sig_handler);
1779 alarm(rec->switch_output.time);
1780 }
1781
Adrian Hunter1b36c032016-09-23 17:38:39 +03001782 /*
1783 * Allow aliases to facilitate the lookup of symbols for address
1784 * filters. Refer to auxtrace_parse_filters().
1785 */
1786 symbol_conf.allow_aliases = true;
1787
1788 symbol__init(NULL);
1789
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02001790 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03001791 if (err)
1792 goto out;
1793
Wang Nan0aab2132016-06-16 08:02:41 +00001794 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001795 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00001796
Wang Nand7888572016-04-08 15:07:24 +00001797 err = bpf__setup_stdout(rec->evlist);
1798 if (err) {
1799 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1800 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1801 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001802 goto out;
Wang Nand7888572016-04-08 15:07:24 +00001803 }
1804
Adrian Hunteref149c22015-04-09 18:53:45 +03001805 err = -ENOMEM;
1806
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001807 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03001808 pr_warning(
1809"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1810"check /proc/sys/kernel/kptr_restrict.\n\n"
1811"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1812"file is not found in the buildid cache or in the vmlinux path.\n\n"
1813"Samples in kernel modules won't be resolved at all.\n\n"
1814"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1815"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001816
Wang Nan0c1d46a2016-04-20 18:59:52 +00001817 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001818 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01001819 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00001820 /*
1821 * In 'perf record --switch-output', disable buildid
1822 * generation by default to reduce data file switching
1823 * overhead. Still generate buildid if they are required
1824 * explicitly using
1825 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01001826 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00001827 * --no-no-buildid-cache
1828 *
1829 * Following code equals to:
1830 *
1831 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1832 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1833 * disable_buildid_cache();
1834 */
1835 bool disable = true;
1836
1837 if (rec->no_buildid_set && !rec->no_buildid)
1838 disable = false;
1839 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1840 disable = false;
1841 if (disable) {
1842 rec->no_buildid = true;
1843 rec->no_buildid_cache = true;
1844 disable_buildid_cache();
1845 }
1846 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001847
Wang Nan4ea648a2016-07-14 08:34:47 +00001848 if (record.opts.overwrite)
1849 record.opts.tail_synthesize = true;
1850
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001851 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03001852 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001853 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03001854 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001855 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001856
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001857 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1858 rec->opts.no_inherit = true;
1859
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001860 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001861 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001862 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01001863 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001864 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001865
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001866 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001867 if (err) {
1868 int saved_errno = errno;
1869
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001870 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001871 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001872
1873 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001874 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001875 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001876
Mengting Zhangca800062017-12-13 15:01:53 +08001877 /* Enable ignoring missing threads when -u/-p option is defined. */
1878 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01001879
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001880 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001881 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001882 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001883
Adrian Hunteref149c22015-04-09 18:53:45 +03001884 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1885 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03001886 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001887
Namhyung Kim61566812016-01-11 22:37:09 +09001888 /*
1889 * We take all buildids when the file contains
1890 * AUX area tracing data because we do not decode the
1891 * trace because it would take too long.
1892 */
1893 if (rec->opts.full_auxtrace)
1894 rec->buildid_all = true;
1895
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001896 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001897 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001898 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001899 }
1900
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001901 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03001902out:
Namhyung Kim45604712014-05-12 09:47:24 +09001903 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001904 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001905 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001906 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001907}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001908
1909static void snapshot_sig_handler(int sig __maybe_unused)
1910{
Jiri Olsadc0c6122017-01-09 10:51:58 +01001911 struct record *rec = &record;
1912
Wang Nan5f9cf592016-04-20 18:59:49 +00001913 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1914 trigger_hit(&auxtrace_snapshot_trigger);
1915 auxtrace_record__snapshot_started = 1;
1916 if (auxtrace_record__snapshot_start(record.itr))
1917 trigger_error(&auxtrace_snapshot_trigger);
1918 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001919
Jiri Olsadc0c6122017-01-09 10:51:58 +01001920 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001921 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001922}
Jiri Olsabfacbe32017-01-09 10:52:00 +01001923
1924static void alarm_sig_handler(int sig __maybe_unused)
1925{
1926 struct record *rec = &record;
1927
1928 if (switch_output_time(rec))
1929 trigger_hit(&switch_output_trigger);
1930}