blob: a468d882e74f3c66a8703891d0c7f3805d07d886 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010065};
66
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030067struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020068 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030069 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020070 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010071 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030072 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 struct perf_evlist *evlist;
74 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000077 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000079 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090080 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000081 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080082 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010083 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070084 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030085 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020086};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020087
Jiri Olsadc0c6122017-01-09 10:51:58 +010088static volatile int auxtrace_record__snapshot_started;
89static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
90static DEFINE_TRIGGER(switch_output_trigger);
91
Alexey Budankov9d2ed642019-01-22 20:47:43 +030092static const char *affinity_tags[PERF_AFFINITY_MAX] = {
93 "SYS", "NODE", "CPU"
94};
95
Jiri Olsadc0c6122017-01-09 10:51:58 +010096static bool switch_output_signal(struct record *rec)
97{
98 return rec->switch_output.signal &&
99 trigger_is_ready(&switch_output_trigger);
100}
101
102static bool switch_output_size(struct record *rec)
103{
104 return rec->switch_output.size &&
105 trigger_is_ready(&switch_output_trigger) &&
106 (rec->bytes_written >= rec->switch_output.size);
107}
108
Jiri Olsabfacbe32017-01-09 10:52:00 +0100109static bool switch_output_time(struct record *rec)
110{
111 return rec->switch_output.time &&
112 trigger_is_ready(&switch_output_trigger);
113}
114
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200115static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
116 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200117{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118 struct perf_data_file *file = &rec->session->data->file;
119
120 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100121 pr_err("failed to write perf data, error: %m\n");
122 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200123 }
David Ahern8d3eca22012-08-26 12:24:47 -0600124
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300125 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100126
127 if (switch_output_size(rec))
128 trigger_hit(&switch_output_trigger);
129
David Ahern8d3eca22012-08-26 12:24:47 -0600130 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200131}
132
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300133#ifdef HAVE_AIO_SUPPORT
134static int record__aio_write(struct aiocb *cblock, int trace_fd,
135 void *buf, size_t size, off_t off)
136{
137 int rc;
138
139 cblock->aio_fildes = trace_fd;
140 cblock->aio_buf = buf;
141 cblock->aio_nbytes = size;
142 cblock->aio_offset = off;
143 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
144
145 do {
146 rc = aio_write(cblock);
147 if (rc == 0) {
148 break;
149 } else if (errno != EAGAIN) {
150 cblock->aio_fildes = -1;
151 pr_err("failed to queue perf data, error: %m\n");
152 break;
153 }
154 } while (1);
155
156 return rc;
157}
158
159static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
160{
161 void *rem_buf;
162 off_t rem_off;
163 size_t rem_size;
164 int rc, aio_errno;
165 ssize_t aio_ret, written;
166
167 aio_errno = aio_error(cblock);
168 if (aio_errno == EINPROGRESS)
169 return 0;
170
171 written = aio_ret = aio_return(cblock);
172 if (aio_ret < 0) {
173 if (aio_errno != EINTR)
174 pr_err("failed to write perf data, error: %m\n");
175 written = 0;
176 }
177
178 rem_size = cblock->aio_nbytes - written;
179
180 if (rem_size == 0) {
181 cblock->aio_fildes = -1;
182 /*
183 * md->refcount is incremented in perf_mmap__push() for
184 * every enqueued aio write request so decrement it because
185 * the request is now complete.
186 */
187 perf_mmap__put(md);
188 rc = 1;
189 } else {
190 /*
191 * aio write request may require restart with the
192 * reminder if the kernel didn't write whole
193 * chunk at once.
194 */
195 rem_off = cblock->aio_offset + written;
196 rem_buf = (void *)(cblock->aio_buf + written);
197 record__aio_write(cblock, cblock->aio_fildes,
198 rem_buf, rem_size, rem_off);
199 rc = 0;
200 }
201
202 return rc;
203}
204
Alexey Budankov93f20c02018-11-06 12:07:19 +0300205static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300206{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300207 struct aiocb **aiocb = md->aio.aiocb;
208 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300209 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300210 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300211
212 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213 do_suspend = 0;
214 for (i = 0; i < md->aio.nr_cblocks; ++i) {
215 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
216 if (sync_all)
217 aiocb[i] = NULL;
218 else
219 return i;
220 } else {
221 /*
222 * Started aio write is not complete yet
223 * so it has to be waited before the
224 * next allocation.
225 */
226 aiocb[i] = &cblocks[i];
227 do_suspend = 1;
228 }
229 }
230 if (!do_suspend)
231 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300232
Alexey Budankov93f20c02018-11-06 12:07:19 +0300233 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300234 if (!(errno == EAGAIN || errno == EINTR))
235 pr_err("failed to sync perf data, error: %m\n");
236 }
237 } while (1);
238}
239
240static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
241{
242 struct record *rec = to;
243 int ret, trace_fd = rec->session->data->file.fd;
244
245 rec->samples++;
246
247 ret = record__aio_write(cblock, trace_fd, bf, size, off);
248 if (!ret) {
249 rec->bytes_written += size;
250 if (switch_output_size(rec))
251 trigger_hit(&switch_output_trigger);
252 }
253
254 return ret;
255}
256
257static off_t record__aio_get_pos(int trace_fd)
258{
259 return lseek(trace_fd, 0, SEEK_CUR);
260}
261
262static void record__aio_set_pos(int trace_fd, off_t pos)
263{
264 lseek(trace_fd, pos, SEEK_SET);
265}
266
267static void record__aio_mmap_read_sync(struct record *rec)
268{
269 int i;
270 struct perf_evlist *evlist = rec->evlist;
271 struct perf_mmap *maps = evlist->mmap;
272
273 if (!rec->opts.nr_cblocks)
274 return;
275
276 for (i = 0; i < evlist->nr_mmaps; i++) {
277 struct perf_mmap *map = &maps[i];
278
279 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300280 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300281 }
282}
283
284static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300285static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300286
287static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300288 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300289 int unset)
290{
291 struct record_opts *opts = (struct record_opts *)opt->value;
292
Alexey Budankov93f20c02018-11-06 12:07:19 +0300293 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300294 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300295 } else {
296 if (str)
297 opts->nr_cblocks = strtol(str, NULL, 0);
298 if (!opts->nr_cblocks)
299 opts->nr_cblocks = nr_cblocks_default;
300 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300301
302 return 0;
303}
304#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300305static int nr_cblocks_max = 0;
306
307static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300308{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300309 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300310}
311
312static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
313 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
314{
315 return -1;
316}
317
318static off_t record__aio_get_pos(int trace_fd __maybe_unused)
319{
320 return -1;
321}
322
323static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
324{
325}
326
327static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
328{
329}
330#endif
331
332static int record__aio_enabled(struct record *rec)
333{
334 return rec->opts.nr_cblocks > 0;
335}
336
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200337static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200338 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300339 struct perf_sample *sample __maybe_unused,
340 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200341{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300342 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200343 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200344}
345
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200346static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300347{
348 struct record *rec = to;
349
350 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200351 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300352}
353
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300354static volatile int done;
355static volatile int signr = -1;
356static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000357
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300358static void sig_handler(int sig)
359{
360 if (sig == SIGCHLD)
361 child_finished = 1;
362 else
363 signr = sig;
364
365 done = 1;
366}
367
Wang Nana0748652016-11-26 07:03:28 +0000368static void sigsegv_handler(int sig)
369{
370 perf_hooks__recover();
371 sighandler_dump_stack(sig);
372}
373
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300374static void record__sig_exit(void)
375{
376 if (signr == -1)
377 return;
378
379 signal(signr, SIG_DFL);
380 raise(signr);
381}
382
Adrian Huntere31f0d02015-04-30 17:37:27 +0300383#ifdef HAVE_AUXTRACE_SUPPORT
384
Adrian Hunteref149c22015-04-09 18:53:45 +0300385static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200386 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300387 union perf_event *event, void *data1,
388 size_t len1, void *data2, size_t len2)
389{
390 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100391 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300392 size_t padding;
393 u8 pad[8] = {0};
394
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100395 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300396 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100397 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300398 int err;
399
400 file_offset = lseek(fd, 0, SEEK_CUR);
401 if (file_offset == -1)
402 return -1;
403 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
404 event, file_offset);
405 if (err)
406 return err;
407 }
408
Adrian Hunteref149c22015-04-09 18:53:45 +0300409 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
410 padding = (len1 + len2) & 7;
411 if (padding)
412 padding = 8 - padding;
413
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200414 record__write(rec, map, event, event->header.size);
415 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300416 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200417 record__write(rec, map, data2, len2);
418 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300419
420 return 0;
421}
422
423static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200424 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300425{
426 int ret;
427
Jiri Olsae035f4c2018-09-13 14:54:05 +0200428 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300429 record__process_auxtrace);
430 if (ret < 0)
431 return ret;
432
433 if (ret)
434 rec->samples++;
435
436 return 0;
437}
438
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300439static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200440 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300441{
442 int ret;
443
Jiri Olsae035f4c2018-09-13 14:54:05 +0200444 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300445 record__process_auxtrace,
446 rec->opts.auxtrace_snapshot_size);
447 if (ret < 0)
448 return ret;
449
450 if (ret)
451 rec->samples++;
452
453 return 0;
454}
455
456static int record__auxtrace_read_snapshot_all(struct record *rec)
457{
458 int i;
459 int rc = 0;
460
461 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200462 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300463
Jiri Olsae035f4c2018-09-13 14:54:05 +0200464 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300465 continue;
466
Jiri Olsae035f4c2018-09-13 14:54:05 +0200467 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300468 rc = -1;
469 goto out;
470 }
471 }
472out:
473 return rc;
474}
475
476static void record__read_auxtrace_snapshot(struct record *rec)
477{
478 pr_debug("Recording AUX area tracing snapshot\n");
479 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000480 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300481 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000482 if (auxtrace_record__snapshot_finish(rec->itr))
483 trigger_error(&auxtrace_snapshot_trigger);
484 else
485 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300486 }
487}
488
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200489static int record__auxtrace_init(struct record *rec)
490{
491 int err;
492
493 if (!rec->itr) {
494 rec->itr = auxtrace_record__init(rec->evlist, &err);
495 if (err)
496 return err;
497 }
498
499 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
500 rec->opts.auxtrace_snapshot_opts);
501 if (err)
502 return err;
503
504 return auxtrace_parse_filters(rec->evlist);
505}
506
Adrian Huntere31f0d02015-04-30 17:37:27 +0300507#else
508
509static inline
510int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200511 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300512{
513 return 0;
514}
515
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300516static inline
517void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
518{
519}
520
521static inline
522int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
523{
524 return 0;
525}
526
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200527static int record__auxtrace_init(struct record *rec __maybe_unused)
528{
529 return 0;
530}
531
Adrian Huntere31f0d02015-04-30 17:37:27 +0300532#endif
533
Wang Nancda57a82016-06-27 10:24:03 +0000534static int record__mmap_evlist(struct record *rec,
535 struct perf_evlist *evlist)
536{
537 struct record_opts *opts = &rec->opts;
538 char msg[512];
539
Alexey Budankovf13de662019-01-22 20:50:57 +0300540 if (opts->affinity != PERF_AFFINITY_SYS)
541 cpu__setup_cpunode_map();
542
Wang Nan7a276ff2017-12-03 02:00:38 +0000543 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000544 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300545 opts->auxtrace_snapshot_mode,
546 opts->nr_cblocks, opts->affinity) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000547 if (errno == EPERM) {
548 pr_err("Permission error mapping pages.\n"
549 "Consider increasing "
550 "/proc/sys/kernel/perf_event_mlock_kb,\n"
551 "or try again with a smaller value of -m/--mmap_pages.\n"
552 "(current value: %u,%u)\n",
553 opts->mmap_pages, opts->auxtrace_mmap_pages);
554 return -errno;
555 } else {
556 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300557 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000558 if (errno)
559 return -errno;
560 else
561 return -EINVAL;
562 }
563 }
564 return 0;
565}
566
567static int record__mmap(struct record *rec)
568{
569 return record__mmap_evlist(rec, rec->evlist);
570}
571
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300572static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200573{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300574 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200575 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200576 struct perf_evlist *evlist = rec->evlist;
577 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300578 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600579 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200580
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300581 /*
582 * For initial_delay we need to add a dummy event so that we can track
583 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
584 * real events, the ones asked by the user.
585 */
586 if (opts->initial_delay) {
587 if (perf_evlist__add_dummy(evlist))
588 return -ENOMEM;
589
590 pos = perf_evlist__first(evlist);
591 pos->tracking = 0;
592 pos = perf_evlist__last(evlist);
593 pos->tracking = 1;
594 pos->attr.enable_on_exec = 1;
595 }
596
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300597 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100598
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300599 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200600try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400601 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300602 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900603 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300604 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300605 goto try_again;
606 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700607 if ((errno == EINVAL || errno == EBADF) &&
608 pos->leader != pos &&
609 pos->weak_group) {
610 pos = perf_evlist__reset_weak_group(evlist, pos);
611 goto try_again;
612 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300613 rc = -errno;
614 perf_evsel__open_strerror(pos, &opts->target,
615 errno, msg, sizeof(msg));
616 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600617 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300618 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800619
620 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800621 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200622
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300623 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300624 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300625 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300626 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600627 rc = -1;
628 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100629 }
630
Wang Nancda57a82016-06-27 10:24:03 +0000631 rc = record__mmap(rec);
632 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600633 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200634
Jiri Olsa563aecb2013-06-05 13:35:06 +0200635 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300636 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600637out:
638 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200639}
640
Namhyung Kime3d59112015-01-29 17:06:44 +0900641static int process_sample_event(struct perf_tool *tool,
642 union perf_event *event,
643 struct perf_sample *sample,
644 struct perf_evsel *evsel,
645 struct machine *machine)
646{
647 struct record *rec = container_of(tool, struct record, tool);
648
Jin Yao68588ba2017-12-08 21:13:42 +0800649 if (rec->evlist->first_sample_time == 0)
650 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900651
Jin Yao68588ba2017-12-08 21:13:42 +0800652 rec->evlist->last_sample_time = sample->time;
653
654 if (rec->buildid_all)
655 return 0;
656
657 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900658 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
659}
660
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300661static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200662{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200663 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200664
Jiri Olsa45112e82019-02-21 10:41:29 +0100665 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300666 return 0;
667
Namhyung Kim00dc8652014-11-04 10:14:32 +0900668 /*
669 * During this process, it'll load kernel map and replace the
670 * dso->long_name to a real pathname it found. In this case
671 * we prefer the vmlinux path like
672 * /lib/modules/3.16.4/build/vmlinux
673 *
674 * rather than build-id path (in debug directory).
675 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
676 */
677 symbol_conf.ignore_vmlinux_buildid = true;
678
Namhyung Kim61566812016-01-11 22:37:09 +0900679 /*
680 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800681 * so no need to process samples. But if timestamp_boundary is enabled,
682 * it still needs to walk on all samples to get the timestamps of
683 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900684 */
Jin Yao68588ba2017-12-08 21:13:42 +0800685 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900686 rec->tool.sample = NULL;
687
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300688 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200689}
690
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200691static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800692{
693 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200694 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800695 /*
696 *As for guest kernel when processing subcommand record&report,
697 *we arrange module mmap prior to guest kernel mmap and trigger
698 *a preload dso because default guest module symbols are loaded
699 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
700 *method is used to avoid symbol missing when the first addr is
701 *in module instead of in guest kernel.
702 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200703 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200704 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800705 if (err < 0)
706 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300707 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800708
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800709 /*
710 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
711 * have no _text sometimes.
712 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200713 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200714 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800715 if (err < 0)
716 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300717 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800718}
719
Frederic Weisbecker98402802010-05-02 22:05:29 +0200720static struct perf_event_header finished_round_event = {
721 .size = sizeof(struct perf_event_header),
722 .type = PERF_RECORD_FINISHED_ROUND,
723};
724
Alexey Budankovf13de662019-01-22 20:50:57 +0300725static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
726{
727 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
728 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
729 CPU_ZERO(&rec->affinity_mask);
730 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
731 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
732 }
733}
734
Wang Nana4ea0ec2016-07-14 08:34:36 +0000735static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Wang Nan0b72d692017-12-04 16:51:07 +0000736 bool overwrite)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200737{
Jiri Olsadcabb502014-07-25 16:56:16 +0200738 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200739 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600740 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000741 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300742 int trace_fd = rec->data.file.fd;
743 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200744
Wang Nancb216862016-06-27 10:24:04 +0000745 if (!evlist)
746 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300747
Wang Nan0b72d692017-12-04 16:51:07 +0000748 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000749 if (!maps)
750 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000751
Wang Nan0b72d692017-12-04 16:51:07 +0000752 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000753 return 0;
754
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300755 if (record__aio_enabled(rec))
756 off = record__aio_get_pos(trace_fd);
757
Wang Nana4ea0ec2016-07-14 08:34:36 +0000758 for (i = 0; i < evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200759 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000760
Jiri Olsae035f4c2018-09-13 14:54:05 +0200761 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300762 record__adjust_affinity(rec, map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300763 if (!record__aio_enabled(rec)) {
764 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
765 rc = -1;
766 goto out;
767 }
768 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300769 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300770 /*
771 * Call record__aio_sync() to wait till map->data buffer
772 * becomes available after previous aio write request.
773 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300774 idx = record__aio_sync(map, false);
775 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300776 record__aio_set_pos(trace_fd, off);
777 rc = -1;
778 goto out;
779 }
David Ahern8d3eca22012-08-26 12:24:47 -0600780 }
781 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300782
Jiri Olsae035f4c2018-09-13 14:54:05 +0200783 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
784 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300785 rc = -1;
786 goto out;
787 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200788 }
789
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300790 if (record__aio_enabled(rec))
791 record__aio_set_pos(trace_fd, off);
792
Jiri Olsadcabb502014-07-25 16:56:16 +0200793 /*
794 * Mark the round finished in case we wrote
795 * at least one event.
796 */
797 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200798 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600799
Wang Nan0b72d692017-12-04 16:51:07 +0000800 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000801 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600802out:
803 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200804}
805
Wang Nancb216862016-06-27 10:24:04 +0000806static int record__mmap_read_all(struct record *rec)
807{
808 int err;
809
Wang Nana4ea0ec2016-07-14 08:34:36 +0000810 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000811 if (err)
812 return err;
813
Wang Nan057374642016-07-14 08:34:43 +0000814 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000815}
816
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300817static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700818{
David Ahern57706ab2013-11-06 11:41:34 -0700819 struct perf_session *session = rec->session;
820 int feat;
821
822 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
823 perf_header__set_feat(&session->header, feat);
824
825 if (rec->no_buildid)
826 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
827
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300828 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700829 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
830
831 if (!rec->opts.branch_stack)
832 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300833
834 if (!rec->opts.full_auxtrace)
835 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100836
Alexey Budankovcf790512018-10-09 17:36:24 +0300837 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
838 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
839
Jiri Olsa258031c2019-03-08 14:47:39 +0100840 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
841
Jiri Olsaffa517a2015-10-25 15:51:43 +0100842 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700843}
844
Wang Nane1ab48b2016-02-26 09:32:10 +0000845static void
846record__finish_output(struct record *rec)
847{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100848 struct perf_data *data = &rec->data;
849 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000850
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100851 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000852 return;
853
854 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +0100855 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000856
857 if (!rec->no_buildid) {
858 process_buildids(rec);
859
860 if (rec->buildid_all)
861 dsos__hit_all(rec->session);
862 }
863 perf_session__write_header(rec->session, rec->evlist, fd, true);
864
865 return;
866}
867
Wang Nan4ea648a2016-07-14 08:34:47 +0000868static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000869{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300870 int err;
871 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000872
Wang Nan4ea648a2016-07-14 08:34:47 +0000873 if (rec->opts.tail_synthesize != tail)
874 return 0;
875
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300876 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
877 if (thread_map == NULL)
878 return -1;
879
880 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000881 process_synthesized_event,
882 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800883 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300884 thread_map__put(thread_map);
885 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000886}
887
Wang Nan4ea648a2016-07-14 08:34:47 +0000888static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000889
Wang Nanecfd7a92016-04-13 08:21:07 +0000890static int
891record__switch_output(struct record *rec, bool at_exit)
892{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100893 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000894 int fd, err;
895
896 /* Same Size: "2015122520103046"*/
897 char timestamp[] = "InvalidTimestamp";
898
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300899 record__aio_mmap_read_sync(rec);
900
Wang Nan4ea648a2016-07-14 08:34:47 +0000901 record__synthesize(rec, true);
902 if (target__none(&rec->opts.target))
903 record__synthesize_workload(rec, true);
904
Wang Nanecfd7a92016-04-13 08:21:07 +0000905 rec->samples = 0;
906 record__finish_output(rec);
907 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
908 if (err) {
909 pr_err("Failed to get current timestamp\n");
910 return -EINVAL;
911 }
912
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100913 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000914 rec->session->header.data_offset,
915 at_exit);
916 if (fd >= 0 && !at_exit) {
917 rec->bytes_written = 0;
918 rec->session->header.data_size = 0;
919 }
920
921 if (!quiet)
922 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +0100923 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000924
925 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000926 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000927 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000928
Wang Nanbe7b0c92016-04-20 18:59:54 +0000929 /*
930 * In 'perf record --switch-output' without -a,
931 * record__synthesize() in record__switch_output() won't
932 * generate tracking events because there's no thread_map
933 * in evlist. Which causes newly created perf.data doesn't
934 * contain map and comm information.
935 * Create a fake thread_map and directly call
936 * perf_event__synthesize_thread_map() for those events.
937 */
938 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000939 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000940 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000941 return fd;
942}
943
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300944static volatile int workload_exec_errno;
945
946/*
947 * perf_evlist__prepare_workload will send a SIGUSR1
948 * if the fork fails, since we asked by setting its
949 * want_signal to true.
950 */
Namhyung Kim45604712014-05-12 09:47:24 +0900951static void workload_exec_failed_signal(int signo __maybe_unused,
952 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300953 void *ucontext __maybe_unused)
954{
955 workload_exec_errno = info->si_value.sival_int;
956 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300957 child_finished = 1;
958}
959
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300960static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100961static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300962
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200963int __weak
964perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
965 struct perf_tool *tool __maybe_unused,
966 perf_event__handler_t process __maybe_unused,
967 struct machine *machine __maybe_unused)
968{
969 return 0;
970}
971
Wang Nanee667f92016-06-27 10:24:05 +0000972static const struct perf_event_mmap_page *
973perf_evlist__pick_pc(struct perf_evlist *evlist)
974{
Wang Nanb2cb6152016-07-14 08:34:39 +0000975 if (evlist) {
976 if (evlist->mmap && evlist->mmap[0].base)
977 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +0000978 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
979 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +0000980 }
Wang Nanee667f92016-06-27 10:24:05 +0000981 return NULL;
982}
983
Wang Nanc45628b2016-05-24 02:28:59 +0000984static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
985{
Wang Nanee667f92016-06-27 10:24:05 +0000986 const struct perf_event_mmap_page *pc;
987
988 pc = perf_evlist__pick_pc(rec->evlist);
989 if (pc)
990 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000991 return NULL;
992}
993
Wang Nan4ea648a2016-07-14 08:34:47 +0000994static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000995{
996 struct perf_session *session = rec->session;
997 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100998 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000999 struct record_opts *opts = &rec->opts;
1000 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001001 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001002 int err = 0;
1003
Wang Nan4ea648a2016-07-14 08:34:47 +00001004 if (rec->opts.tail_synthesize != tail)
1005 return 0;
1006
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001007 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001008 /*
1009 * We need to synthesize events first, because some
1010 * features works on top of them (on report side).
1011 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001012 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001013 process_synthesized_event);
1014 if (err < 0) {
1015 pr_err("Couldn't synthesize attrs.\n");
1016 goto out;
1017 }
1018
Jiri Olsaa2015512018-03-14 10:22:04 +01001019 err = perf_event__synthesize_features(tool, session, rec->evlist,
1020 process_synthesized_event);
1021 if (err < 0) {
1022 pr_err("Couldn't synthesize features.\n");
1023 return err;
1024 }
1025
Wang Nanc45c86e2016-02-26 09:32:07 +00001026 if (have_tracepoints(&rec->evlist->entries)) {
1027 /*
1028 * FIXME err <= 0 here actually means that
1029 * there were no tracepoints so its not really
1030 * an error, just that we don't need to
1031 * synthesize anything. We really have to
1032 * return this more properly and also
1033 * propagate errors that now are calling die()
1034 */
1035 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1036 process_synthesized_event);
1037 if (err <= 0) {
1038 pr_err("Couldn't record tracing data.\n");
1039 goto out;
1040 }
1041 rec->bytes_written += err;
1042 }
1043 }
1044
Wang Nanc45628b2016-05-24 02:28:59 +00001045 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001046 process_synthesized_event, machine);
1047 if (err)
1048 goto out;
1049
Wang Nanc45c86e2016-02-26 09:32:07 +00001050 if (rec->opts.full_auxtrace) {
1051 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1052 session, process_synthesized_event);
1053 if (err)
1054 goto out;
1055 }
1056
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001057 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1058 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1059 machine);
1060 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1061 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1062 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001063
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001064 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1065 machine);
1066 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1067 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1068 "Check /proc/modules permission or run as root.\n");
1069 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001070
1071 if (perf_guest) {
1072 machines__process_guests(&session->machines,
1073 perf_event__synthesize_guest_os, tool);
1074 }
1075
Andi Kleenbfd8f722017-11-17 13:42:58 -08001076 err = perf_event__synthesize_extra_attr(&rec->tool,
1077 rec->evlist,
1078 process_synthesized_event,
1079 data->is_pipe);
1080 if (err)
1081 goto out;
1082
Andi Kleen373565d2017-11-17 13:42:59 -08001083 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1084 process_synthesized_event,
1085 NULL);
1086 if (err < 0) {
1087 pr_err("Couldn't synthesize thread map.\n");
1088 return err;
1089 }
1090
1091 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1092 process_synthesized_event, NULL);
1093 if (err < 0) {
1094 pr_err("Couldn't synthesize cpu map.\n");
1095 return err;
1096 }
1097
Song Liu7b612e22019-01-17 08:15:19 -08001098 err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
1099 machine, opts);
1100 if (err < 0)
1101 pr_warning("Couldn't synthesize bpf events.\n");
1102
Wang Nanc45c86e2016-02-26 09:32:07 +00001103 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1104 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001105 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001106out:
1107 return err;
1108}
1109
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001110static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001111{
David Ahern57706ab2013-11-06 11:41:34 -07001112 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001113 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001114 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001115 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001116 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001117 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001118 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001119 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001120 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001121 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001122
Namhyung Kim45604712014-05-12 09:47:24 +09001123 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001124 signal(SIGCHLD, sig_handler);
1125 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001126 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001127 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001128
Hari Bathinif3b36142017-03-08 02:11:43 +05301129 if (rec->opts.record_namespaces)
1130 tool->namespace_events = true;
1131
Jiri Olsadc0c6122017-01-09 10:51:58 +01001132 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001133 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001134 if (rec->opts.auxtrace_snapshot_mode)
1135 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001136 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001137 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001138 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001139 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001140 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001141
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001142 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001143 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001144 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001145 return -1;
1146 }
1147
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001148 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001149 rec->session = session;
1150
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001151 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001152
Alexey Budankovcf790512018-10-09 17:36:24 +03001153 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1154 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1155
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001156 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001157 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001158 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001159 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001160 if (err < 0) {
1161 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001162 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001163 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001164 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001165 }
1166
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001167 /*
1168 * If we have just single event and are sending data
1169 * through pipe, we need to force the ids allocation,
1170 * because we synthesize event name through the pipe
1171 * and need the id for that.
1172 */
1173 if (data->is_pipe && rec->evlist->nr_entries == 1)
1174 rec->opts.sample_id = true;
1175
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001176 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001177 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001178 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001179 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001180
Wang Nan8690a2a2016-02-22 09:10:32 +00001181 err = bpf__apply_obj_config();
1182 if (err) {
1183 char errbuf[BUFSIZ];
1184
1185 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1186 pr_err("ERROR: Apply config to BPF failed: %s\n",
1187 errbuf);
1188 goto out_child;
1189 }
1190
Adrian Huntercca84822015-08-19 17:29:21 +03001191 /*
1192 * Normally perf_session__new would do this, but it doesn't have the
1193 * evlist.
1194 */
1195 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1196 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1197 rec->tool.ordered_events = false;
1198 }
1199
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001200 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001201 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1202
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001203 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001204 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001205 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001206 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001207 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001208 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001209 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001210 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001211 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001212
David Ahernd3665492012-02-06 15:27:52 -07001213 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001214 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001215 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001216 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001217 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001218 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001219 }
1220
Wang Nan4ea648a2016-07-14 08:34:47 +00001221 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001222 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001223 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001224
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001225 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001226 struct sched_param param;
1227
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001228 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001229 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001230 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001231 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001232 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001233 }
1234 }
1235
Jiri Olsa774cb492012-11-12 18:34:01 +01001236 /*
1237 * When perf is starting the traced process, all the events
1238 * (apart from group members) have enable_on_exec=1 set,
1239 * so don't spoil it by prematurely enabling them.
1240 */
Andi Kleen6619a532014-01-11 13:38:27 -08001241 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001242 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001243
Peter Zijlstra856e9662009-12-16 17:55:55 +01001244 /*
1245 * Let the child rip
1246 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001247 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001248 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001249 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301250 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001251
1252 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1253 if (event == NULL) {
1254 err = -ENOMEM;
1255 goto out_child;
1256 }
1257
Namhyung Kime803cf92015-09-22 09:24:55 +09001258 /*
1259 * Some H/W events are generated before COMM event
1260 * which is emitted during exec(), so perf script
1261 * cannot see a correct process name for those events.
1262 * Synthesize COMM event to prevent it.
1263 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301264 tgid = perf_event__synthesize_comm(tool, event,
1265 rec->evlist->workload.pid,
1266 process_synthesized_event,
1267 machine);
1268 free(event);
1269
1270 if (tgid == -1)
1271 goto out_child;
1272
1273 event = malloc(sizeof(event->namespaces) +
1274 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1275 machine->id_hdr_size);
1276 if (event == NULL) {
1277 err = -ENOMEM;
1278 goto out_child;
1279 }
1280
1281 /*
1282 * Synthesize NAMESPACES event for the command specified.
1283 */
1284 perf_event__synthesize_namespaces(tool, event,
1285 rec->evlist->workload.pid,
1286 tgid, process_synthesized_event,
1287 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001288 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001289
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001290 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001291 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001292
Andi Kleen6619a532014-01-11 13:38:27 -08001293 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001294 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001295 perf_evlist__enable(rec->evlist);
1296 }
1297
Wang Nan5f9cf592016-04-20 18:59:49 +00001298 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001299 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001300 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001301 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001302 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001303
Wang Nan057374642016-07-14 08:34:43 +00001304 /*
1305 * rec->evlist->bkw_mmap_state is possible to be
1306 * BKW_MMAP_EMPTY here: when done == true and
1307 * hits != rec->samples in previous round.
1308 *
1309 * perf_evlist__toggle_bkw_mmap ensure we never
1310 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1311 */
1312 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1313 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1314
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001315 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001316 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001317 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001318 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001319 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001320 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001321
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001322 if (auxtrace_record__snapshot_started) {
1323 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001324 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001325 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001326 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001327 pr_err("AUX area tracing snapshot failed\n");
1328 err = -1;
1329 goto out_child;
1330 }
1331 }
1332
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001333 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001334 /*
1335 * If switch_output_trigger is hit, the data in
1336 * overwritable ring buffer should have been collected,
1337 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1338 *
1339 * If SIGUSR2 raise after or during record__mmap_read_all(),
1340 * record__mmap_read_all() didn't collect data from
1341 * overwritable ring buffer. Read again.
1342 */
1343 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1344 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001345 trigger_ready(&switch_output_trigger);
1346
Wang Nan057374642016-07-14 08:34:43 +00001347 /*
1348 * Reenable events in overwrite ring buffer after
1349 * record__mmap_read_all(): we should have collected
1350 * data from it.
1351 */
1352 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1353
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001354 if (!quiet)
1355 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1356 waking);
1357 waking = 0;
1358 fd = record__switch_output(rec, false);
1359 if (fd < 0) {
1360 pr_err("Failed to switch to new file\n");
1361 trigger_error(&switch_output_trigger);
1362 err = fd;
1363 goto out_child;
1364 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001365
1366 /* re-arm the alarm */
1367 if (rec->switch_output.time)
1368 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001369 }
1370
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001371 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001372 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001373 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001374 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001375 /*
1376 * Propagate error, only if there's any. Ignore positive
1377 * number of returned events and interrupt error.
1378 */
1379 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001380 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001381 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001382
1383 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1384 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001385 }
1386
Jiri Olsa774cb492012-11-12 18:34:01 +01001387 /*
1388 * When perf is starting the traced process, at the end events
1389 * die with the process and we wait for that. Thus no need to
1390 * disable events in this case.
1391 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001392 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001393 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001394 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001395 disabled = true;
1396 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001397 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001398 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001399 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001400
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001401 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001402 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001403 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001404 pr_err("Workload failed: %s\n", emsg);
1405 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001406 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001407 }
1408
Namhyung Kime3d59112015-01-29 17:06:44 +09001409 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001410 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001411
Wang Nan4ea648a2016-07-14 08:34:47 +00001412 if (target__none(&rec->opts.target))
1413 record__synthesize_workload(rec, true);
1414
Namhyung Kim45604712014-05-12 09:47:24 +09001415out_child:
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001416 record__aio_mmap_read_sync(rec);
1417
Namhyung Kim45604712014-05-12 09:47:24 +09001418 if (forks) {
1419 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001420
Namhyung Kim45604712014-05-12 09:47:24 +09001421 if (!child_finished)
1422 kill(rec->evlist->workload.pid, SIGTERM);
1423
1424 wait(&exit_status);
1425
1426 if (err < 0)
1427 status = err;
1428 else if (WIFEXITED(exit_status))
1429 status = WEXITSTATUS(exit_status);
1430 else if (WIFSIGNALED(exit_status))
1431 signr = WTERMSIG(exit_status);
1432 } else
1433 status = err;
1434
Wang Nan4ea648a2016-07-14 08:34:47 +00001435 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001436 /* this will be recalculated during process_buildids() */
1437 rec->samples = 0;
1438
Wang Nanecfd7a92016-04-13 08:21:07 +00001439 if (!err) {
1440 if (!rec->timestamp_filename) {
1441 record__finish_output(rec);
1442 } else {
1443 fd = record__switch_output(rec, true);
1444 if (fd < 0) {
1445 status = fd;
1446 goto out_delete_session;
1447 }
1448 }
1449 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001450
Wang Nana0748652016-11-26 07:03:28 +00001451 perf_hooks__invoke_record_end();
1452
Namhyung Kime3d59112015-01-29 17:06:44 +09001453 if (!err && !quiet) {
1454 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001455 const char *postfix = rec->timestamp_filename ?
1456 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001457
Adrian Hunteref149c22015-04-09 18:53:45 +03001458 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001459 scnprintf(samples, sizeof(samples),
1460 " (%" PRIu64 " samples)", rec->samples);
1461 else
1462 samples[0] = '\0';
1463
Wang Nanecfd7a92016-04-13 08:21:07 +00001464 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001465 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001466 data->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001467 }
1468
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001469out_delete_session:
1470 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001471 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001472}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001473
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001474static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001475{
Kan Liangaad2b212015-01-05 13:23:04 -05001476 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001477
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001478 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001479
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001480 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001481 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001482 callchain->dump_size);
1483}
1484
1485int record_opts__parse_callchain(struct record_opts *record,
1486 struct callchain_param *callchain,
1487 const char *arg, bool unset)
1488{
1489 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001490 callchain->enabled = !unset;
1491
1492 /* --no-call-graph */
1493 if (unset) {
1494 callchain->record_mode = CALLCHAIN_NONE;
1495 pr_debug("callchain: disabled\n");
1496 return 0;
1497 }
1498
1499 ret = parse_callchain_record_opt(arg, callchain);
1500 if (!ret) {
1501 /* Enable data address sampling for DWARF unwind. */
1502 if (callchain->record_mode == CALLCHAIN_DWARF)
1503 record->sample_address = true;
1504 callchain_debug(callchain);
1505 }
1506
1507 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001508}
1509
Kan Liangc421e802015-07-29 05:42:12 -04001510int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001511 const char *arg,
1512 int unset)
1513{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001514 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001515}
1516
Kan Liangc421e802015-07-29 05:42:12 -04001517int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001518 const char *arg __maybe_unused,
1519 int unset __maybe_unused)
1520{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001521 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001522
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001523 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001524
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001525 if (callchain->record_mode == CALLCHAIN_NONE)
1526 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001527
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001528 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001529 return 0;
1530}
1531
Jiri Olsaeb853e82014-02-03 12:44:42 +01001532static int perf_record_config(const char *var, const char *value, void *cb)
1533{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001534 struct record *rec = cb;
1535
1536 if (!strcmp(var, "record.build-id")) {
1537 if (!strcmp(value, "cache"))
1538 rec->no_buildid_cache = false;
1539 else if (!strcmp(value, "no-cache"))
1540 rec->no_buildid_cache = true;
1541 else if (!strcmp(value, "skip"))
1542 rec->no_buildid = true;
1543 else
1544 return -1;
1545 return 0;
1546 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001547 if (!strcmp(var, "record.call-graph")) {
1548 var = "call-graph.record-mode";
1549 return perf_default_config(var, value, cb);
1550 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001551#ifdef HAVE_AIO_SUPPORT
1552 if (!strcmp(var, "record.aio")) {
1553 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1554 if (!rec->opts.nr_cblocks)
1555 rec->opts.nr_cblocks = nr_cblocks_default;
1556 }
1557#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001558
Yisheng Xiecff17202018-03-12 19:25:57 +08001559 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001560}
1561
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001562struct clockid_map {
1563 const char *name;
1564 int clockid;
1565};
1566
1567#define CLOCKID_MAP(n, c) \
1568 { .name = n, .clockid = (c), }
1569
1570#define CLOCKID_END { .name = NULL, }
1571
1572
1573/*
1574 * Add the missing ones, we need to build on many distros...
1575 */
1576#ifndef CLOCK_MONOTONIC_RAW
1577#define CLOCK_MONOTONIC_RAW 4
1578#endif
1579#ifndef CLOCK_BOOTTIME
1580#define CLOCK_BOOTTIME 7
1581#endif
1582#ifndef CLOCK_TAI
1583#define CLOCK_TAI 11
1584#endif
1585
1586static const struct clockid_map clockids[] = {
1587 /* available for all events, NMI safe */
1588 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1589 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1590
1591 /* available for some events */
1592 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1593 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1594 CLOCKID_MAP("tai", CLOCK_TAI),
1595
1596 /* available for the lazy */
1597 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1598 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1599 CLOCKID_MAP("real", CLOCK_REALTIME),
1600 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1601
1602 CLOCKID_END,
1603};
1604
Alexey Budankovcf790512018-10-09 17:36:24 +03001605static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1606{
1607 struct timespec res;
1608
1609 *res_ns = 0;
1610 if (!clock_getres(clk_id, &res))
1611 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1612 else
1613 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1614
1615 return 0;
1616}
1617
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001618static int parse_clockid(const struct option *opt, const char *str, int unset)
1619{
1620 struct record_opts *opts = (struct record_opts *)opt->value;
1621 const struct clockid_map *cm;
1622 const char *ostr = str;
1623
1624 if (unset) {
1625 opts->use_clockid = 0;
1626 return 0;
1627 }
1628
1629 /* no arg passed */
1630 if (!str)
1631 return 0;
1632
1633 /* no setting it twice */
1634 if (opts->use_clockid)
1635 return -1;
1636
1637 opts->use_clockid = true;
1638
1639 /* if its a number, we're done */
1640 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001641 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001642
1643 /* allow a "CLOCK_" prefix to the name */
1644 if (!strncasecmp(str, "CLOCK_", 6))
1645 str += 6;
1646
1647 for (cm = clockids; cm->name; cm++) {
1648 if (!strcasecmp(str, cm->name)) {
1649 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001650 return get_clockid_res(opts->clockid,
1651 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001652 }
1653 }
1654
1655 opts->use_clockid = false;
1656 ui__warning("unknown clockid %s, check man page\n", ostr);
1657 return -1;
1658}
1659
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001660static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1661{
1662 struct record_opts *opts = (struct record_opts *)opt->value;
1663
1664 if (unset || !str)
1665 return 0;
1666
1667 if (!strcasecmp(str, "node"))
1668 opts->affinity = PERF_AFFINITY_NODE;
1669 else if (!strcasecmp(str, "cpu"))
1670 opts->affinity = PERF_AFFINITY_CPU;
1671
1672 return 0;
1673}
1674
Adrian Huntere9db1312015-04-09 18:53:46 +03001675static int record__parse_mmap_pages(const struct option *opt,
1676 const char *str,
1677 int unset __maybe_unused)
1678{
1679 struct record_opts *opts = opt->value;
1680 char *s, *p;
1681 unsigned int mmap_pages;
1682 int ret;
1683
1684 if (!str)
1685 return -EINVAL;
1686
1687 s = strdup(str);
1688 if (!s)
1689 return -ENOMEM;
1690
1691 p = strchr(s, ',');
1692 if (p)
1693 *p = '\0';
1694
1695 if (*s) {
1696 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1697 if (ret)
1698 goto out_free;
1699 opts->mmap_pages = mmap_pages;
1700 }
1701
1702 if (!p) {
1703 ret = 0;
1704 goto out_free;
1705 }
1706
1707 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1708 if (ret)
1709 goto out_free;
1710
1711 opts->auxtrace_mmap_pages = mmap_pages;
1712
1713out_free:
1714 free(s);
1715 return ret;
1716}
1717
Jiri Olsa0c582442017-01-09 10:51:59 +01001718static void switch_output_size_warn(struct record *rec)
1719{
1720 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1721 struct switch_output *s = &rec->switch_output;
1722
1723 wakeup_size /= 2;
1724
1725 if (s->size < wakeup_size) {
1726 char buf[100];
1727
1728 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1729 pr_warning("WARNING: switch-output data size lower than "
1730 "wakeup kernel buffer size (%s) "
1731 "expect bigger perf.data sizes\n", buf);
1732 }
1733}
1734
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001735static int switch_output_setup(struct record *rec)
1736{
1737 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001738 static struct parse_tag tags_size[] = {
1739 { .tag = 'B', .mult = 1 },
1740 { .tag = 'K', .mult = 1 << 10 },
1741 { .tag = 'M', .mult = 1 << 20 },
1742 { .tag = 'G', .mult = 1 << 30 },
1743 { .tag = 0 },
1744 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001745 static struct parse_tag tags_time[] = {
1746 { .tag = 's', .mult = 1 },
1747 { .tag = 'm', .mult = 60 },
1748 { .tag = 'h', .mult = 60*60 },
1749 { .tag = 'd', .mult = 60*60*24 },
1750 { .tag = 0 },
1751 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001752 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001753
1754 if (!s->set)
1755 return 0;
1756
1757 if (!strcmp(s->str, "signal")) {
1758 s->signal = true;
1759 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001760 goto enabled;
1761 }
1762
1763 val = parse_tag_value(s->str, tags_size);
1764 if (val != (unsigned long) -1) {
1765 s->size = val;
1766 pr_debug("switch-output with %s size threshold\n", s->str);
1767 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001768 }
1769
Jiri Olsabfacbe32017-01-09 10:52:00 +01001770 val = parse_tag_value(s->str, tags_time);
1771 if (val != (unsigned long) -1) {
1772 s->time = val;
1773 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1774 s->str, s->time);
1775 goto enabled;
1776 }
1777
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001778 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001779
1780enabled:
1781 rec->timestamp_filename = true;
1782 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001783
1784 if (s->size && !rec->opts.no_buffering)
1785 switch_output_size_warn(rec);
1786
Jiri Olsadc0c6122017-01-09 10:51:58 +01001787 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001788}
1789
Namhyung Kime5b2c202014-10-23 00:15:46 +09001790static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001791 "perf record [<options>] [<command>]",
1792 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001793 NULL
1794};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001795const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001796
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001797/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001798 * XXX Ideally would be local to cmd_record() and passed to a record__new
1799 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001800 * after cmd_record() exits, but since record_options need to be accessible to
1801 * builtin-script, leave it here.
1802 *
1803 * At least we don't ouch it in all the other functions here directly.
1804 *
1805 * Just say no to tons of global variables, sigh.
1806 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001807static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001808 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001809 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001810 .mmap_pages = UINT_MAX,
1811 .user_freq = UINT_MAX,
1812 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001813 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001814 .target = {
1815 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001816 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001817 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001818 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001819 .tool = {
1820 .sample = process_sample_event,
1821 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001822 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001823 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301824 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001825 .mmap = perf_event__process_mmap,
1826 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001827 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001828 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001829};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001830
Namhyung Kim76a26542015-10-22 23:28:32 +09001831const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1832 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001833
Wang Nan0aab2132016-06-16 08:02:41 +00001834static bool dry_run;
1835
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001836/*
1837 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1838 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001839 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001840 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1841 * using pipes, etc.
1842 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001843static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001844 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001845 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001846 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001847 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001848 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001849 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1850 NULL, "don't record events from perf itself",
1851 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001852 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001853 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001854 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001855 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001856 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001857 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001858 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001859 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001860 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001861 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001862 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001863 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001864 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001865 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001866 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001867 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001868 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001869 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1870 &record.opts.no_inherit_set,
1871 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001872 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1873 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001874 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu45178a92019-01-17 08:15:18 -08001875 OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001876 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1877 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001878 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1879 "profile at this frequency",
1880 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001881 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1882 "number of mmap data pages and AUX area tracing mmap pages",
1883 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001884 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001885 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001886 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001887 NULL, "enables call-graph recording" ,
1888 &record_callchain_opt),
1889 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001890 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001891 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001892 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001893 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001894 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001895 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001896 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001897 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001898 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1899 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001900 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001901 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1902 &record.opts.sample_time_set,
1903 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001904 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1905 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001906 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001907 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001908 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1909 &record.no_buildid_cache_set,
1910 "do not update the buildid cache"),
1911 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1912 &record.no_buildid_set,
1913 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001914 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001915 "monitor event in cgroup name only",
1916 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001917 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001918 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001919 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1920 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001921
1922 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1923 "branch any", "sample any taken branches",
1924 parse_branch_stack),
1925
1926 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1927 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001928 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001929 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1930 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001931 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1932 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001933 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1934 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001935 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1936 "sample selected machine registers on interrupt,"
1937 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001938 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1939 "sample selected machine registers on interrupt,"
1940 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001941 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1942 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001943 OPT_CALLBACK('k', "clockid", &record.opts,
1944 "clockid", "clockid to use for events, see clock_gettime()",
1945 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001946 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1947 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001948 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04001949 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301950 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1951 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001952 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1953 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001954 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1955 "Configure all used events to run in kernel space.",
1956 PARSE_OPT_EXCLUSIVE),
1957 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1958 "Configure all used events to run in user space.",
1959 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001960 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1961 "clang binary to use for compiling BPF scriptlets"),
1962 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1963 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001964 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1965 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001966 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1967 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001968 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1969 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08001970 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1971 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001972 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001973 &record.switch_output.set, "signal,size,time",
1974 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001975 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001976 OPT_BOOLEAN(0, "dry-run", &dry_run,
1977 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001978#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03001979 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
1980 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001981 record__aio_parse),
1982#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001983 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
1984 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
1985 record__parse_affinity),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001986 OPT_END()
1987};
1988
Namhyung Kime5b2c202014-10-23 00:15:46 +09001989struct option *record_options = __record_options;
1990
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001991int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001992{
Adrian Hunteref149c22015-04-09 18:53:45 +03001993 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001994 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001995 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001996
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001997 setlocale(LC_ALL, "");
1998
Wang Nan48e1cab2015-12-14 10:39:22 +00001999#ifndef HAVE_LIBBPF_SUPPORT
2000# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2001 set_nobuild('\0', "clang-path", true);
2002 set_nobuild('\0', "clang-opt", true);
2003# undef set_nobuild
2004#endif
2005
He Kuang7efe0e02015-12-14 10:39:23 +00002006#ifndef HAVE_BPF_PROLOGUE
2007# if !defined (HAVE_DWARF_SUPPORT)
2008# define REASON "NO_DWARF=1"
2009# elif !defined (HAVE_LIBBPF_SUPPORT)
2010# define REASON "NO_LIBBPF=1"
2011# else
2012# define REASON "this architecture doesn't support BPF prologue"
2013# endif
2014# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2015 set_nobuild('\0', "vmlinux", true);
2016# undef set_nobuild
2017# undef REASON
2018#endif
2019
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002020 CPU_ZERO(&rec->affinity_mask);
2021 rec->opts.affinity = PERF_AFFINITY_SYS;
2022
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002023 rec->evlist = perf_evlist__new();
2024 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002025 return -ENOMEM;
2026
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002027 err = perf_config(perf_record_config, rec);
2028 if (err)
2029 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002030
Tom Zanussibca647a2010-11-10 08:11:30 -06002031 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002032 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002033 if (quiet)
2034 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002035
2036 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002037 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002038 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002039
Namhyung Kimbea03402012-04-26 14:15:15 +09002040 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002041 usage_with_options_msg(record_usage, record_options,
2042 "cgroup monitoring only available in system-wide mode");
2043
Stephane Eranian023695d2011-02-14 11:20:01 +02002044 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002045 if (rec->opts.record_switch_events &&
2046 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002047 ui__error("kernel does not support recording context switch events\n");
2048 parse_options_usage(record_usage, record_options, "switch-events", 0);
2049 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002050 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002051
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002052 if (switch_output_setup(rec)) {
2053 parse_options_usage(record_usage, record_options, "switch-output", 0);
2054 return -EINVAL;
2055 }
2056
Jiri Olsabfacbe32017-01-09 10:52:00 +01002057 if (rec->switch_output.time) {
2058 signal(SIGALRM, alarm_sig_handler);
2059 alarm(rec->switch_output.time);
2060 }
2061
Adrian Hunter1b36c032016-09-23 17:38:39 +03002062 /*
2063 * Allow aliases to facilitate the lookup of symbols for address
2064 * filters. Refer to auxtrace_parse_filters().
2065 */
2066 symbol_conf.allow_aliases = true;
2067
2068 symbol__init(NULL);
2069
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002070 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002071 if (err)
2072 goto out;
2073
Wang Nan0aab2132016-06-16 08:02:41 +00002074 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002075 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002076
Wang Nand7888572016-04-08 15:07:24 +00002077 err = bpf__setup_stdout(rec->evlist);
2078 if (err) {
2079 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2080 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2081 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002082 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002083 }
2084
Adrian Hunteref149c22015-04-09 18:53:45 +03002085 err = -ENOMEM;
2086
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002087 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002088 pr_warning(
2089"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2090"check /proc/sys/kernel/kptr_restrict.\n\n"
2091"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2092"file is not found in the buildid cache or in the vmlinux path.\n\n"
2093"Samples in kernel modules won't be resolved at all.\n\n"
2094"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2095"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002096
Wang Nan0c1d46a2016-04-20 18:59:52 +00002097 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002098 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002099 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002100 /*
2101 * In 'perf record --switch-output', disable buildid
2102 * generation by default to reduce data file switching
2103 * overhead. Still generate buildid if they are required
2104 * explicitly using
2105 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002106 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002107 * --no-no-buildid-cache
2108 *
2109 * Following code equals to:
2110 *
2111 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2112 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2113 * disable_buildid_cache();
2114 */
2115 bool disable = true;
2116
2117 if (rec->no_buildid_set && !rec->no_buildid)
2118 disable = false;
2119 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2120 disable = false;
2121 if (disable) {
2122 rec->no_buildid = true;
2123 rec->no_buildid_cache = true;
2124 disable_buildid_cache();
2125 }
2126 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002127
Wang Nan4ea648a2016-07-14 08:34:47 +00002128 if (record.opts.overwrite)
2129 record.opts.tail_synthesize = true;
2130
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002131 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002132 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002133 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002134 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002135 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002136
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002137 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2138 rec->opts.no_inherit = true;
2139
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002140 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002141 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002142 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002143 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002144 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002145
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002146 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002147 if (err) {
2148 int saved_errno = errno;
2149
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002150 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002151 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002152
2153 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002154 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002155 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002156
Mengting Zhangca800062017-12-13 15:01:53 +08002157 /* Enable ignoring missing threads when -u/-p option is defined. */
2158 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002159
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002160 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002161 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002162 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002163
Adrian Hunteref149c22015-04-09 18:53:45 +03002164 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2165 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002166 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002167
Namhyung Kim61566812016-01-11 22:37:09 +09002168 /*
2169 * We take all buildids when the file contains
2170 * AUX area tracing data because we do not decode the
2171 * trace because it would take too long.
2172 */
2173 if (rec->opts.full_auxtrace)
2174 rec->buildid_all = true;
2175
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002176 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002177 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002178 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002179 }
2180
Alexey Budankov93f20c02018-11-06 12:07:19 +03002181 if (rec->opts.nr_cblocks > nr_cblocks_max)
2182 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002183 if (verbose > 0)
2184 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2185
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002186 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
2187
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002188 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002189out:
Namhyung Kim45604712014-05-12 09:47:24 +09002190 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002191 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002192 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002193 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002194}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002195
2196static void snapshot_sig_handler(int sig __maybe_unused)
2197{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002198 struct record *rec = &record;
2199
Wang Nan5f9cf592016-04-20 18:59:49 +00002200 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2201 trigger_hit(&auxtrace_snapshot_trigger);
2202 auxtrace_record__snapshot_started = 1;
2203 if (auxtrace_record__snapshot_start(record.itr))
2204 trigger_error(&auxtrace_snapshot_trigger);
2205 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002206
Jiri Olsadc0c6122017-01-09 10:51:58 +01002207 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002208 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002209}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002210
2211static void alarm_sig_handler(int sig __maybe_unused)
2212{
2213 struct record *rec = &record;
2214
2215 if (switch_output_time(rec))
2216 trigger_hit(&switch_output_trigger);
2217}