blob: 7ced5f3e81006bc93a56821b67738705347129a0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030041#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030042#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080043#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020045
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030046#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030047#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030048#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030049#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020050#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020051#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030052#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030053#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030055#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030056
Jiri Olsa1b43b702017-01-09 10:51:56 +010057struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010058 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010059 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010060 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010061 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010062 const char *str;
63 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010064};
65
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030066struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020067 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030068 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020069 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010070 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030071 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020072 struct perf_evlist *evlist;
73 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020074 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000076 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000078 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090079 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000080 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080081 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010082 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070083 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030084 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020085};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020086
Jiri Olsadc0c6122017-01-09 10:51:58 +010087static volatile int auxtrace_record__snapshot_started;
88static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
89static DEFINE_TRIGGER(switch_output_trigger);
90
Alexey Budankov9d2ed642019-01-22 20:47:43 +030091static const char *affinity_tags[PERF_AFFINITY_MAX] = {
92 "SYS", "NODE", "CPU"
93};
94
Jiri Olsadc0c6122017-01-09 10:51:58 +010095static bool switch_output_signal(struct record *rec)
96{
97 return rec->switch_output.signal &&
98 trigger_is_ready(&switch_output_trigger);
99}
100
101static bool switch_output_size(struct record *rec)
102{
103 return rec->switch_output.size &&
104 trigger_is_ready(&switch_output_trigger) &&
105 (rec->bytes_written >= rec->switch_output.size);
106}
107
Jiri Olsabfacbe32017-01-09 10:52:00 +0100108static bool switch_output_time(struct record *rec)
109{
110 return rec->switch_output.time &&
111 trigger_is_ready(&switch_output_trigger);
112}
113
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200114static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
115 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200116{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200117 struct perf_data_file *file = &rec->session->data->file;
118
119 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100120 pr_err("failed to write perf data, error: %m\n");
121 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200122 }
David Ahern8d3eca22012-08-26 12:24:47 -0600123
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300124 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100125
126 if (switch_output_size(rec))
127 trigger_hit(&switch_output_trigger);
128
David Ahern8d3eca22012-08-26 12:24:47 -0600129 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200130}
131
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300132#ifdef HAVE_AIO_SUPPORT
133static int record__aio_write(struct aiocb *cblock, int trace_fd,
134 void *buf, size_t size, off_t off)
135{
136 int rc;
137
138 cblock->aio_fildes = trace_fd;
139 cblock->aio_buf = buf;
140 cblock->aio_nbytes = size;
141 cblock->aio_offset = off;
142 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
143
144 do {
145 rc = aio_write(cblock);
146 if (rc == 0) {
147 break;
148 } else if (errno != EAGAIN) {
149 cblock->aio_fildes = -1;
150 pr_err("failed to queue perf data, error: %m\n");
151 break;
152 }
153 } while (1);
154
155 return rc;
156}
157
158static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
159{
160 void *rem_buf;
161 off_t rem_off;
162 size_t rem_size;
163 int rc, aio_errno;
164 ssize_t aio_ret, written;
165
166 aio_errno = aio_error(cblock);
167 if (aio_errno == EINPROGRESS)
168 return 0;
169
170 written = aio_ret = aio_return(cblock);
171 if (aio_ret < 0) {
172 if (aio_errno != EINTR)
173 pr_err("failed to write perf data, error: %m\n");
174 written = 0;
175 }
176
177 rem_size = cblock->aio_nbytes - written;
178
179 if (rem_size == 0) {
180 cblock->aio_fildes = -1;
181 /*
182 * md->refcount is incremented in perf_mmap__push() for
183 * every enqueued aio write request so decrement it because
184 * the request is now complete.
185 */
186 perf_mmap__put(md);
187 rc = 1;
188 } else {
189 /*
190 * aio write request may require restart with the
191 * reminder if the kernel didn't write whole
192 * chunk at once.
193 */
194 rem_off = cblock->aio_offset + written;
195 rem_buf = (void *)(cblock->aio_buf + written);
196 record__aio_write(cblock, cblock->aio_fildes,
197 rem_buf, rem_size, rem_off);
198 rc = 0;
199 }
200
201 return rc;
202}
203
Alexey Budankov93f20c02018-11-06 12:07:19 +0300204static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300205{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300206 struct aiocb **aiocb = md->aio.aiocb;
207 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300208 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300209 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300210
211 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300212 do_suspend = 0;
213 for (i = 0; i < md->aio.nr_cblocks; ++i) {
214 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
215 if (sync_all)
216 aiocb[i] = NULL;
217 else
218 return i;
219 } else {
220 /*
221 * Started aio write is not complete yet
222 * so it has to be waited before the
223 * next allocation.
224 */
225 aiocb[i] = &cblocks[i];
226 do_suspend = 1;
227 }
228 }
229 if (!do_suspend)
230 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300231
Alexey Budankov93f20c02018-11-06 12:07:19 +0300232 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300233 if (!(errno == EAGAIN || errno == EINTR))
234 pr_err("failed to sync perf data, error: %m\n");
235 }
236 } while (1);
237}
238
239static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
240{
241 struct record *rec = to;
242 int ret, trace_fd = rec->session->data->file.fd;
243
244 rec->samples++;
245
246 ret = record__aio_write(cblock, trace_fd, bf, size, off);
247 if (!ret) {
248 rec->bytes_written += size;
249 if (switch_output_size(rec))
250 trigger_hit(&switch_output_trigger);
251 }
252
253 return ret;
254}
255
256static off_t record__aio_get_pos(int trace_fd)
257{
258 return lseek(trace_fd, 0, SEEK_CUR);
259}
260
261static void record__aio_set_pos(int trace_fd, off_t pos)
262{
263 lseek(trace_fd, pos, SEEK_SET);
264}
265
266static void record__aio_mmap_read_sync(struct record *rec)
267{
268 int i;
269 struct perf_evlist *evlist = rec->evlist;
270 struct perf_mmap *maps = evlist->mmap;
271
272 if (!rec->opts.nr_cblocks)
273 return;
274
275 for (i = 0; i < evlist->nr_mmaps; i++) {
276 struct perf_mmap *map = &maps[i];
277
278 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300279 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300280 }
281}
282
283static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300284static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300285
286static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300287 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300288 int unset)
289{
290 struct record_opts *opts = (struct record_opts *)opt->value;
291
Alexey Budankov93f20c02018-11-06 12:07:19 +0300292 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300293 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300294 } else {
295 if (str)
296 opts->nr_cblocks = strtol(str, NULL, 0);
297 if (!opts->nr_cblocks)
298 opts->nr_cblocks = nr_cblocks_default;
299 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300300
301 return 0;
302}
303#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300304static int nr_cblocks_max = 0;
305
306static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300307{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300308 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300309}
310
311static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
312 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
313{
314 return -1;
315}
316
317static off_t record__aio_get_pos(int trace_fd __maybe_unused)
318{
319 return -1;
320}
321
322static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
323{
324}
325
326static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
327{
328}
329#endif
330
331static int record__aio_enabled(struct record *rec)
332{
333 return rec->opts.nr_cblocks > 0;
334}
335
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200336static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200337 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300338 struct perf_sample *sample __maybe_unused,
339 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200340{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300341 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200342 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200343}
344
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200345static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300346{
347 struct record *rec = to;
348
349 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200350 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300351}
352
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300353static volatile int done;
354static volatile int signr = -1;
355static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000356
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300357static void sig_handler(int sig)
358{
359 if (sig == SIGCHLD)
360 child_finished = 1;
361 else
362 signr = sig;
363
364 done = 1;
365}
366
Wang Nana0748652016-11-26 07:03:28 +0000367static void sigsegv_handler(int sig)
368{
369 perf_hooks__recover();
370 sighandler_dump_stack(sig);
371}
372
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300373static void record__sig_exit(void)
374{
375 if (signr == -1)
376 return;
377
378 signal(signr, SIG_DFL);
379 raise(signr);
380}
381
Adrian Huntere31f0d02015-04-30 17:37:27 +0300382#ifdef HAVE_AUXTRACE_SUPPORT
383
Adrian Hunteref149c22015-04-09 18:53:45 +0300384static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200385 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300386 union perf_event *event, void *data1,
387 size_t len1, void *data2, size_t len2)
388{
389 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100390 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300391 size_t padding;
392 u8 pad[8] = {0};
393
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100394 if (!perf_data__is_pipe(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300395 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100396 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300397 int err;
398
399 file_offset = lseek(fd, 0, SEEK_CUR);
400 if (file_offset == -1)
401 return -1;
402 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
403 event, file_offset);
404 if (err)
405 return err;
406 }
407
Adrian Hunteref149c22015-04-09 18:53:45 +0300408 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
409 padding = (len1 + len2) & 7;
410 if (padding)
411 padding = 8 - padding;
412
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200413 record__write(rec, map, event, event->header.size);
414 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300415 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200416 record__write(rec, map, data2, len2);
417 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300418
419 return 0;
420}
421
422static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200423 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300424{
425 int ret;
426
Jiri Olsae035f4c2018-09-13 14:54:05 +0200427 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300428 record__process_auxtrace);
429 if (ret < 0)
430 return ret;
431
432 if (ret)
433 rec->samples++;
434
435 return 0;
436}
437
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300438static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200439 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300440{
441 int ret;
442
Jiri Olsae035f4c2018-09-13 14:54:05 +0200443 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300444 record__process_auxtrace,
445 rec->opts.auxtrace_snapshot_size);
446 if (ret < 0)
447 return ret;
448
449 if (ret)
450 rec->samples++;
451
452 return 0;
453}
454
455static int record__auxtrace_read_snapshot_all(struct record *rec)
456{
457 int i;
458 int rc = 0;
459
460 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200461 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300462
Jiri Olsae035f4c2018-09-13 14:54:05 +0200463 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300464 continue;
465
Jiri Olsae035f4c2018-09-13 14:54:05 +0200466 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300467 rc = -1;
468 goto out;
469 }
470 }
471out:
472 return rc;
473}
474
475static void record__read_auxtrace_snapshot(struct record *rec)
476{
477 pr_debug("Recording AUX area tracing snapshot\n");
478 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000479 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300480 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000481 if (auxtrace_record__snapshot_finish(rec->itr))
482 trigger_error(&auxtrace_snapshot_trigger);
483 else
484 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300485 }
486}
487
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200488static int record__auxtrace_init(struct record *rec)
489{
490 int err;
491
492 if (!rec->itr) {
493 rec->itr = auxtrace_record__init(rec->evlist, &err);
494 if (err)
495 return err;
496 }
497
498 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
499 rec->opts.auxtrace_snapshot_opts);
500 if (err)
501 return err;
502
503 return auxtrace_parse_filters(rec->evlist);
504}
505
Adrian Huntere31f0d02015-04-30 17:37:27 +0300506#else
507
508static inline
509int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200510 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300511{
512 return 0;
513}
514
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300515static inline
516void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
517{
518}
519
520static inline
521int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
522{
523 return 0;
524}
525
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200526static int record__auxtrace_init(struct record *rec __maybe_unused)
527{
528 return 0;
529}
530
Adrian Huntere31f0d02015-04-30 17:37:27 +0300531#endif
532
Wang Nancda57a82016-06-27 10:24:03 +0000533static int record__mmap_evlist(struct record *rec,
534 struct perf_evlist *evlist)
535{
536 struct record_opts *opts = &rec->opts;
537 char msg[512];
538
Wang Nan7a276ff2017-12-03 02:00:38 +0000539 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000540 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300541 opts->auxtrace_snapshot_mode,
542 opts->nr_cblocks, opts->affinity) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000543 if (errno == EPERM) {
544 pr_err("Permission error mapping pages.\n"
545 "Consider increasing "
546 "/proc/sys/kernel/perf_event_mlock_kb,\n"
547 "or try again with a smaller value of -m/--mmap_pages.\n"
548 "(current value: %u,%u)\n",
549 opts->mmap_pages, opts->auxtrace_mmap_pages);
550 return -errno;
551 } else {
552 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300553 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000554 if (errno)
555 return -errno;
556 else
557 return -EINVAL;
558 }
559 }
560 return 0;
561}
562
563static int record__mmap(struct record *rec)
564{
565 return record__mmap_evlist(rec, rec->evlist);
566}
567
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300568static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200569{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300570 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200571 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200572 struct perf_evlist *evlist = rec->evlist;
573 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300574 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600575 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200576
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300577 /*
578 * For initial_delay we need to add a dummy event so that we can track
579 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
580 * real events, the ones asked by the user.
581 */
582 if (opts->initial_delay) {
583 if (perf_evlist__add_dummy(evlist))
584 return -ENOMEM;
585
586 pos = perf_evlist__first(evlist);
587 pos->tracking = 0;
588 pos = perf_evlist__last(evlist);
589 pos->tracking = 1;
590 pos->attr.enable_on_exec = 1;
591 }
592
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300593 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100594
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300595 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200596try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400597 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300598 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900599 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300600 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300601 goto try_again;
602 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700603 if ((errno == EINVAL || errno == EBADF) &&
604 pos->leader != pos &&
605 pos->weak_group) {
606 pos = perf_evlist__reset_weak_group(evlist, pos);
607 goto try_again;
608 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300609 rc = -errno;
610 perf_evsel__open_strerror(pos, &opts->target,
611 errno, msg, sizeof(msg));
612 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600613 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300614 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800615
616 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800617 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200618
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300619 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300620 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300621 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300622 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600623 rc = -1;
624 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100625 }
626
Wang Nancda57a82016-06-27 10:24:03 +0000627 rc = record__mmap(rec);
628 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600629 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200630
Jiri Olsa563aecb2013-06-05 13:35:06 +0200631 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300632 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600633out:
634 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200635}
636
Namhyung Kime3d59112015-01-29 17:06:44 +0900637static int process_sample_event(struct perf_tool *tool,
638 union perf_event *event,
639 struct perf_sample *sample,
640 struct perf_evsel *evsel,
641 struct machine *machine)
642{
643 struct record *rec = container_of(tool, struct record, tool);
644
Jin Yao68588ba2017-12-08 21:13:42 +0800645 if (rec->evlist->first_sample_time == 0)
646 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900647
Jin Yao68588ba2017-12-08 21:13:42 +0800648 rec->evlist->last_sample_time = sample->time;
649
650 if (rec->buildid_all)
651 return 0;
652
653 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900654 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
655}
656
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300657static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200658{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100659 struct perf_data *data = &rec->data;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200660 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200661
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100662 if (data->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300663 return 0;
664
Namhyung Kim00dc8652014-11-04 10:14:32 +0900665 /*
666 * During this process, it'll load kernel map and replace the
667 * dso->long_name to a real pathname it found. In this case
668 * we prefer the vmlinux path like
669 * /lib/modules/3.16.4/build/vmlinux
670 *
671 * rather than build-id path (in debug directory).
672 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
673 */
674 symbol_conf.ignore_vmlinux_buildid = true;
675
Namhyung Kim61566812016-01-11 22:37:09 +0900676 /*
677 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800678 * so no need to process samples. But if timestamp_boundary is enabled,
679 * it still needs to walk on all samples to get the timestamps of
680 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900681 */
Jin Yao68588ba2017-12-08 21:13:42 +0800682 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900683 rec->tool.sample = NULL;
684
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300685 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200686}
687
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200688static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800689{
690 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200691 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800692 /*
693 *As for guest kernel when processing subcommand record&report,
694 *we arrange module mmap prior to guest kernel mmap and trigger
695 *a preload dso because default guest module symbols are loaded
696 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
697 *method is used to avoid symbol missing when the first addr is
698 *in module instead of in guest kernel.
699 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200700 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200701 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800702 if (err < 0)
703 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300704 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800705
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800706 /*
707 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
708 * have no _text sometimes.
709 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200710 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200711 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800712 if (err < 0)
713 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300714 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800715}
716
Frederic Weisbecker98402802010-05-02 22:05:29 +0200717static struct perf_event_header finished_round_event = {
718 .size = sizeof(struct perf_event_header),
719 .type = PERF_RECORD_FINISHED_ROUND,
720};
721
Wang Nana4ea0ec2016-07-14 08:34:36 +0000722static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Wang Nan0b72d692017-12-04 16:51:07 +0000723 bool overwrite)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200724{
Jiri Olsadcabb502014-07-25 16:56:16 +0200725 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200726 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600727 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000728 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300729 int trace_fd = rec->data.file.fd;
730 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200731
Wang Nancb216862016-06-27 10:24:04 +0000732 if (!evlist)
733 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300734
Wang Nan0b72d692017-12-04 16:51:07 +0000735 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000736 if (!maps)
737 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000738
Wang Nan0b72d692017-12-04 16:51:07 +0000739 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000740 return 0;
741
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300742 if (record__aio_enabled(rec))
743 off = record__aio_get_pos(trace_fd);
744
Wang Nana4ea0ec2016-07-14 08:34:36 +0000745 for (i = 0; i < evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200746 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000747
Jiri Olsae035f4c2018-09-13 14:54:05 +0200748 if (map->base) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300749 if (!record__aio_enabled(rec)) {
750 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
751 rc = -1;
752 goto out;
753 }
754 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300755 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300756 /*
757 * Call record__aio_sync() to wait till map->data buffer
758 * becomes available after previous aio write request.
759 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300760 idx = record__aio_sync(map, false);
761 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300762 record__aio_set_pos(trace_fd, off);
763 rc = -1;
764 goto out;
765 }
David Ahern8d3eca22012-08-26 12:24:47 -0600766 }
767 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300768
Jiri Olsae035f4c2018-09-13 14:54:05 +0200769 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
770 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300771 rc = -1;
772 goto out;
773 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200774 }
775
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300776 if (record__aio_enabled(rec))
777 record__aio_set_pos(trace_fd, off);
778
Jiri Olsadcabb502014-07-25 16:56:16 +0200779 /*
780 * Mark the round finished in case we wrote
781 * at least one event.
782 */
783 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200784 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600785
Wang Nan0b72d692017-12-04 16:51:07 +0000786 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000787 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600788out:
789 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200790}
791
Wang Nancb216862016-06-27 10:24:04 +0000792static int record__mmap_read_all(struct record *rec)
793{
794 int err;
795
Wang Nana4ea0ec2016-07-14 08:34:36 +0000796 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000797 if (err)
798 return err;
799
Wang Nan057374642016-07-14 08:34:43 +0000800 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000801}
802
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300803static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700804{
David Ahern57706ab2013-11-06 11:41:34 -0700805 struct perf_session *session = rec->session;
806 int feat;
807
808 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
809 perf_header__set_feat(&session->header, feat);
810
811 if (rec->no_buildid)
812 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
813
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300814 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700815 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
816
817 if (!rec->opts.branch_stack)
818 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300819
820 if (!rec->opts.full_auxtrace)
821 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100822
Alexey Budankovcf790512018-10-09 17:36:24 +0300823 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
824 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
825
Jiri Olsaffa517a2015-10-25 15:51:43 +0100826 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700827}
828
Wang Nane1ab48b2016-02-26 09:32:10 +0000829static void
830record__finish_output(struct record *rec)
831{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100832 struct perf_data *data = &rec->data;
833 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000834
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100835 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000836 return;
837
838 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100839 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000840
841 if (!rec->no_buildid) {
842 process_buildids(rec);
843
844 if (rec->buildid_all)
845 dsos__hit_all(rec->session);
846 }
847 perf_session__write_header(rec->session, rec->evlist, fd, true);
848
849 return;
850}
851
Wang Nan4ea648a2016-07-14 08:34:47 +0000852static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000853{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300854 int err;
855 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000856
Wang Nan4ea648a2016-07-14 08:34:47 +0000857 if (rec->opts.tail_synthesize != tail)
858 return 0;
859
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300860 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
861 if (thread_map == NULL)
862 return -1;
863
864 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000865 process_synthesized_event,
866 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800867 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300868 thread_map__put(thread_map);
869 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000870}
871
Wang Nan4ea648a2016-07-14 08:34:47 +0000872static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000873
Wang Nanecfd7a92016-04-13 08:21:07 +0000874static int
875record__switch_output(struct record *rec, bool at_exit)
876{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100877 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000878 int fd, err;
879
880 /* Same Size: "2015122520103046"*/
881 char timestamp[] = "InvalidTimestamp";
882
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300883 record__aio_mmap_read_sync(rec);
884
Wang Nan4ea648a2016-07-14 08:34:47 +0000885 record__synthesize(rec, true);
886 if (target__none(&rec->opts.target))
887 record__synthesize_workload(rec, true);
888
Wang Nanecfd7a92016-04-13 08:21:07 +0000889 rec->samples = 0;
890 record__finish_output(rec);
891 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
892 if (err) {
893 pr_err("Failed to get current timestamp\n");
894 return -EINVAL;
895 }
896
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100897 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000898 rec->session->header.data_offset,
899 at_exit);
900 if (fd >= 0 && !at_exit) {
901 rec->bytes_written = 0;
902 rec->session->header.data_size = 0;
903 }
904
905 if (!quiet)
906 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +0100907 data->file.path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000908
909 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000910 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000911 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000912
Wang Nanbe7b0c92016-04-20 18:59:54 +0000913 /*
914 * In 'perf record --switch-output' without -a,
915 * record__synthesize() in record__switch_output() won't
916 * generate tracking events because there's no thread_map
917 * in evlist. Which causes newly created perf.data doesn't
918 * contain map and comm information.
919 * Create a fake thread_map and directly call
920 * perf_event__synthesize_thread_map() for those events.
921 */
922 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000923 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000924 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000925 return fd;
926}
927
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300928static volatile int workload_exec_errno;
929
930/*
931 * perf_evlist__prepare_workload will send a SIGUSR1
932 * if the fork fails, since we asked by setting its
933 * want_signal to true.
934 */
Namhyung Kim45604712014-05-12 09:47:24 +0900935static void workload_exec_failed_signal(int signo __maybe_unused,
936 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300937 void *ucontext __maybe_unused)
938{
939 workload_exec_errno = info->si_value.sival_int;
940 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300941 child_finished = 1;
942}
943
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300944static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100945static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300946
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200947int __weak
948perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
949 struct perf_tool *tool __maybe_unused,
950 perf_event__handler_t process __maybe_unused,
951 struct machine *machine __maybe_unused)
952{
953 return 0;
954}
955
Wang Nanee667f92016-06-27 10:24:05 +0000956static const struct perf_event_mmap_page *
957perf_evlist__pick_pc(struct perf_evlist *evlist)
958{
Wang Nanb2cb6152016-07-14 08:34:39 +0000959 if (evlist) {
960 if (evlist->mmap && evlist->mmap[0].base)
961 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +0000962 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
963 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +0000964 }
Wang Nanee667f92016-06-27 10:24:05 +0000965 return NULL;
966}
967
Wang Nanc45628b2016-05-24 02:28:59 +0000968static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
969{
Wang Nanee667f92016-06-27 10:24:05 +0000970 const struct perf_event_mmap_page *pc;
971
972 pc = perf_evlist__pick_pc(rec->evlist);
973 if (pc)
974 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000975 return NULL;
976}
977
Wang Nan4ea648a2016-07-14 08:34:47 +0000978static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000979{
980 struct perf_session *session = rec->session;
981 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100982 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000983 struct record_opts *opts = &rec->opts;
984 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100985 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +0000986 int err = 0;
987
Wang Nan4ea648a2016-07-14 08:34:47 +0000988 if (rec->opts.tail_synthesize != tail)
989 return 0;
990
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100991 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +0100992 /*
993 * We need to synthesize events first, because some
994 * features works on top of them (on report side).
995 */
Jiri Olsa318ec182018-08-30 08:32:15 +0200996 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +0000997 process_synthesized_event);
998 if (err < 0) {
999 pr_err("Couldn't synthesize attrs.\n");
1000 goto out;
1001 }
1002
Jiri Olsaa2015512018-03-14 10:22:04 +01001003 err = perf_event__synthesize_features(tool, session, rec->evlist,
1004 process_synthesized_event);
1005 if (err < 0) {
1006 pr_err("Couldn't synthesize features.\n");
1007 return err;
1008 }
1009
Wang Nanc45c86e2016-02-26 09:32:07 +00001010 if (have_tracepoints(&rec->evlist->entries)) {
1011 /*
1012 * FIXME err <= 0 here actually means that
1013 * there were no tracepoints so its not really
1014 * an error, just that we don't need to
1015 * synthesize anything. We really have to
1016 * return this more properly and also
1017 * propagate errors that now are calling die()
1018 */
1019 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1020 process_synthesized_event);
1021 if (err <= 0) {
1022 pr_err("Couldn't record tracing data.\n");
1023 goto out;
1024 }
1025 rec->bytes_written += err;
1026 }
1027 }
1028
Wang Nanc45628b2016-05-24 02:28:59 +00001029 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001030 process_synthesized_event, machine);
1031 if (err)
1032 goto out;
1033
Wang Nanc45c86e2016-02-26 09:32:07 +00001034 if (rec->opts.full_auxtrace) {
1035 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1036 session, process_synthesized_event);
1037 if (err)
1038 goto out;
1039 }
1040
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001041 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1042 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1043 machine);
1044 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1045 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1046 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001047
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001048 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1049 machine);
1050 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1051 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1052 "Check /proc/modules permission or run as root.\n");
1053 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001054
1055 if (perf_guest) {
1056 machines__process_guests(&session->machines,
1057 perf_event__synthesize_guest_os, tool);
1058 }
1059
Andi Kleenbfd8f722017-11-17 13:42:58 -08001060 err = perf_event__synthesize_extra_attr(&rec->tool,
1061 rec->evlist,
1062 process_synthesized_event,
1063 data->is_pipe);
1064 if (err)
1065 goto out;
1066
Andi Kleen373565d2017-11-17 13:42:59 -08001067 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1068 process_synthesized_event,
1069 NULL);
1070 if (err < 0) {
1071 pr_err("Couldn't synthesize thread map.\n");
1072 return err;
1073 }
1074
1075 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1076 process_synthesized_event, NULL);
1077 if (err < 0) {
1078 pr_err("Couldn't synthesize cpu map.\n");
1079 return err;
1080 }
1081
Song Liu7b612e22019-01-17 08:15:19 -08001082 err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
1083 machine, opts);
1084 if (err < 0)
1085 pr_warning("Couldn't synthesize bpf events.\n");
1086
Wang Nanc45c86e2016-02-26 09:32:07 +00001087 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1088 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001089 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001090out:
1091 return err;
1092}
1093
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001094static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001095{
David Ahern57706ab2013-11-06 11:41:34 -07001096 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001097 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001098 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001099 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001100 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001101 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001102 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001103 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001104 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001105 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001106
Namhyung Kim45604712014-05-12 09:47:24 +09001107 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001108 signal(SIGCHLD, sig_handler);
1109 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001110 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001111 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001112
Hari Bathinif3b36142017-03-08 02:11:43 +05301113 if (rec->opts.record_namespaces)
1114 tool->namespace_events = true;
1115
Jiri Olsadc0c6122017-01-09 10:51:58 +01001116 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001117 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001118 if (rec->opts.auxtrace_snapshot_mode)
1119 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001120 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001121 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001122 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001123 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001124 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001125
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001126 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001127 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001128 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001129 return -1;
1130 }
1131
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001132 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001133 rec->session = session;
1134
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001135 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001136
Alexey Budankovcf790512018-10-09 17:36:24 +03001137 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1138 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1139
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001140 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001141 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001142 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001143 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001144 if (err < 0) {
1145 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001146 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001147 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001148 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001149 }
1150
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001151 /*
1152 * If we have just single event and are sending data
1153 * through pipe, we need to force the ids allocation,
1154 * because we synthesize event name through the pipe
1155 * and need the id for that.
1156 */
1157 if (data->is_pipe && rec->evlist->nr_entries == 1)
1158 rec->opts.sample_id = true;
1159
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001160 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001161 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001162 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001163 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001164
Wang Nan8690a2a2016-02-22 09:10:32 +00001165 err = bpf__apply_obj_config();
1166 if (err) {
1167 char errbuf[BUFSIZ];
1168
1169 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1170 pr_err("ERROR: Apply config to BPF failed: %s\n",
1171 errbuf);
1172 goto out_child;
1173 }
1174
Adrian Huntercca84822015-08-19 17:29:21 +03001175 /*
1176 * Normally perf_session__new would do this, but it doesn't have the
1177 * evlist.
1178 */
1179 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1180 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1181 rec->tool.ordered_events = false;
1182 }
1183
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001184 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001185 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1186
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001187 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001188 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001189 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001190 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001191 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001192 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001193 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001194 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001195 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001196
David Ahernd3665492012-02-06 15:27:52 -07001197 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001198 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001199 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001200 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001201 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001202 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001203 }
1204
Wang Nan4ea648a2016-07-14 08:34:47 +00001205 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001206 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001207 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001208
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001209 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001210 struct sched_param param;
1211
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001212 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001213 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001214 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001215 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001216 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001217 }
1218 }
1219
Jiri Olsa774cb492012-11-12 18:34:01 +01001220 /*
1221 * When perf is starting the traced process, all the events
1222 * (apart from group members) have enable_on_exec=1 set,
1223 * so don't spoil it by prematurely enabling them.
1224 */
Andi Kleen6619a532014-01-11 13:38:27 -08001225 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001226 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001227
Peter Zijlstra856e9662009-12-16 17:55:55 +01001228 /*
1229 * Let the child rip
1230 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001231 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001232 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001233 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301234 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001235
1236 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1237 if (event == NULL) {
1238 err = -ENOMEM;
1239 goto out_child;
1240 }
1241
Namhyung Kime803cf92015-09-22 09:24:55 +09001242 /*
1243 * Some H/W events are generated before COMM event
1244 * which is emitted during exec(), so perf script
1245 * cannot see a correct process name for those events.
1246 * Synthesize COMM event to prevent it.
1247 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301248 tgid = perf_event__synthesize_comm(tool, event,
1249 rec->evlist->workload.pid,
1250 process_synthesized_event,
1251 machine);
1252 free(event);
1253
1254 if (tgid == -1)
1255 goto out_child;
1256
1257 event = malloc(sizeof(event->namespaces) +
1258 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1259 machine->id_hdr_size);
1260 if (event == NULL) {
1261 err = -ENOMEM;
1262 goto out_child;
1263 }
1264
1265 /*
1266 * Synthesize NAMESPACES event for the command specified.
1267 */
1268 perf_event__synthesize_namespaces(tool, event,
1269 rec->evlist->workload.pid,
1270 tgid, process_synthesized_event,
1271 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001272 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001273
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001274 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001275 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001276
Andi Kleen6619a532014-01-11 13:38:27 -08001277 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001278 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001279 perf_evlist__enable(rec->evlist);
1280 }
1281
Wang Nan5f9cf592016-04-20 18:59:49 +00001282 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001283 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001284 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001285 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001286 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001287
Wang Nan057374642016-07-14 08:34:43 +00001288 /*
1289 * rec->evlist->bkw_mmap_state is possible to be
1290 * BKW_MMAP_EMPTY here: when done == true and
1291 * hits != rec->samples in previous round.
1292 *
1293 * perf_evlist__toggle_bkw_mmap ensure we never
1294 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1295 */
1296 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1297 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1298
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001299 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001300 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001301 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001302 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001303 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001304 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001305
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001306 if (auxtrace_record__snapshot_started) {
1307 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001308 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001309 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001310 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001311 pr_err("AUX area tracing snapshot failed\n");
1312 err = -1;
1313 goto out_child;
1314 }
1315 }
1316
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001317 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001318 /*
1319 * If switch_output_trigger is hit, the data in
1320 * overwritable ring buffer should have been collected,
1321 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1322 *
1323 * If SIGUSR2 raise after or during record__mmap_read_all(),
1324 * record__mmap_read_all() didn't collect data from
1325 * overwritable ring buffer. Read again.
1326 */
1327 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1328 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001329 trigger_ready(&switch_output_trigger);
1330
Wang Nan057374642016-07-14 08:34:43 +00001331 /*
1332 * Reenable events in overwrite ring buffer after
1333 * record__mmap_read_all(): we should have collected
1334 * data from it.
1335 */
1336 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1337
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001338 if (!quiet)
1339 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1340 waking);
1341 waking = 0;
1342 fd = record__switch_output(rec, false);
1343 if (fd < 0) {
1344 pr_err("Failed to switch to new file\n");
1345 trigger_error(&switch_output_trigger);
1346 err = fd;
1347 goto out_child;
1348 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001349
1350 /* re-arm the alarm */
1351 if (rec->switch_output.time)
1352 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001353 }
1354
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001355 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001356 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001357 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001358 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001359 /*
1360 * Propagate error, only if there's any. Ignore positive
1361 * number of returned events and interrupt error.
1362 */
1363 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001364 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001365 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001366
1367 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1368 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001369 }
1370
Jiri Olsa774cb492012-11-12 18:34:01 +01001371 /*
1372 * When perf is starting the traced process, at the end events
1373 * die with the process and we wait for that. Thus no need to
1374 * disable events in this case.
1375 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001376 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001377 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001378 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001379 disabled = true;
1380 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001381 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001382 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001383 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001384
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001385 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001386 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001387 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001388 pr_err("Workload failed: %s\n", emsg);
1389 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001390 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001391 }
1392
Namhyung Kime3d59112015-01-29 17:06:44 +09001393 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001394 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001395
Wang Nan4ea648a2016-07-14 08:34:47 +00001396 if (target__none(&rec->opts.target))
1397 record__synthesize_workload(rec, true);
1398
Namhyung Kim45604712014-05-12 09:47:24 +09001399out_child:
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001400 record__aio_mmap_read_sync(rec);
1401
Namhyung Kim45604712014-05-12 09:47:24 +09001402 if (forks) {
1403 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001404
Namhyung Kim45604712014-05-12 09:47:24 +09001405 if (!child_finished)
1406 kill(rec->evlist->workload.pid, SIGTERM);
1407
1408 wait(&exit_status);
1409
1410 if (err < 0)
1411 status = err;
1412 else if (WIFEXITED(exit_status))
1413 status = WEXITSTATUS(exit_status);
1414 else if (WIFSIGNALED(exit_status))
1415 signr = WTERMSIG(exit_status);
1416 } else
1417 status = err;
1418
Wang Nan4ea648a2016-07-14 08:34:47 +00001419 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001420 /* this will be recalculated during process_buildids() */
1421 rec->samples = 0;
1422
Wang Nanecfd7a92016-04-13 08:21:07 +00001423 if (!err) {
1424 if (!rec->timestamp_filename) {
1425 record__finish_output(rec);
1426 } else {
1427 fd = record__switch_output(rec, true);
1428 if (fd < 0) {
1429 status = fd;
1430 goto out_delete_session;
1431 }
1432 }
1433 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001434
Wang Nana0748652016-11-26 07:03:28 +00001435 perf_hooks__invoke_record_end();
1436
Namhyung Kime3d59112015-01-29 17:06:44 +09001437 if (!err && !quiet) {
1438 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001439 const char *postfix = rec->timestamp_filename ?
1440 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001441
Adrian Hunteref149c22015-04-09 18:53:45 +03001442 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001443 scnprintf(samples, sizeof(samples),
1444 " (%" PRIu64 " samples)", rec->samples);
1445 else
1446 samples[0] = '\0';
1447
Wang Nanecfd7a92016-04-13 08:21:07 +00001448 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001449 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001450 data->file.path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001451 }
1452
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001453out_delete_session:
1454 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001455 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001456}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001457
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001458static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001459{
Kan Liangaad2b212015-01-05 13:23:04 -05001460 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001461
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001462 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001463
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001464 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001465 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001466 callchain->dump_size);
1467}
1468
1469int record_opts__parse_callchain(struct record_opts *record,
1470 struct callchain_param *callchain,
1471 const char *arg, bool unset)
1472{
1473 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001474 callchain->enabled = !unset;
1475
1476 /* --no-call-graph */
1477 if (unset) {
1478 callchain->record_mode = CALLCHAIN_NONE;
1479 pr_debug("callchain: disabled\n");
1480 return 0;
1481 }
1482
1483 ret = parse_callchain_record_opt(arg, callchain);
1484 if (!ret) {
1485 /* Enable data address sampling for DWARF unwind. */
1486 if (callchain->record_mode == CALLCHAIN_DWARF)
1487 record->sample_address = true;
1488 callchain_debug(callchain);
1489 }
1490
1491 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001492}
1493
Kan Liangc421e802015-07-29 05:42:12 -04001494int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001495 const char *arg,
1496 int unset)
1497{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001498 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001499}
1500
Kan Liangc421e802015-07-29 05:42:12 -04001501int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001502 const char *arg __maybe_unused,
1503 int unset __maybe_unused)
1504{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001505 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001506
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001507 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001508
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001509 if (callchain->record_mode == CALLCHAIN_NONE)
1510 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001511
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001512 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001513 return 0;
1514}
1515
Jiri Olsaeb853e82014-02-03 12:44:42 +01001516static int perf_record_config(const char *var, const char *value, void *cb)
1517{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001518 struct record *rec = cb;
1519
1520 if (!strcmp(var, "record.build-id")) {
1521 if (!strcmp(value, "cache"))
1522 rec->no_buildid_cache = false;
1523 else if (!strcmp(value, "no-cache"))
1524 rec->no_buildid_cache = true;
1525 else if (!strcmp(value, "skip"))
1526 rec->no_buildid = true;
1527 else
1528 return -1;
1529 return 0;
1530 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001531 if (!strcmp(var, "record.call-graph")) {
1532 var = "call-graph.record-mode";
1533 return perf_default_config(var, value, cb);
1534 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001535#ifdef HAVE_AIO_SUPPORT
1536 if (!strcmp(var, "record.aio")) {
1537 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1538 if (!rec->opts.nr_cblocks)
1539 rec->opts.nr_cblocks = nr_cblocks_default;
1540 }
1541#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001542
Yisheng Xiecff17202018-03-12 19:25:57 +08001543 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001544}
1545
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001546struct clockid_map {
1547 const char *name;
1548 int clockid;
1549};
1550
1551#define CLOCKID_MAP(n, c) \
1552 { .name = n, .clockid = (c), }
1553
1554#define CLOCKID_END { .name = NULL, }
1555
1556
1557/*
1558 * Add the missing ones, we need to build on many distros...
1559 */
1560#ifndef CLOCK_MONOTONIC_RAW
1561#define CLOCK_MONOTONIC_RAW 4
1562#endif
1563#ifndef CLOCK_BOOTTIME
1564#define CLOCK_BOOTTIME 7
1565#endif
1566#ifndef CLOCK_TAI
1567#define CLOCK_TAI 11
1568#endif
1569
1570static const struct clockid_map clockids[] = {
1571 /* available for all events, NMI safe */
1572 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1573 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1574
1575 /* available for some events */
1576 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1577 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1578 CLOCKID_MAP("tai", CLOCK_TAI),
1579
1580 /* available for the lazy */
1581 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1582 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1583 CLOCKID_MAP("real", CLOCK_REALTIME),
1584 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1585
1586 CLOCKID_END,
1587};
1588
Alexey Budankovcf790512018-10-09 17:36:24 +03001589static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1590{
1591 struct timespec res;
1592
1593 *res_ns = 0;
1594 if (!clock_getres(clk_id, &res))
1595 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1596 else
1597 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1598
1599 return 0;
1600}
1601
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001602static int parse_clockid(const struct option *opt, const char *str, int unset)
1603{
1604 struct record_opts *opts = (struct record_opts *)opt->value;
1605 const struct clockid_map *cm;
1606 const char *ostr = str;
1607
1608 if (unset) {
1609 opts->use_clockid = 0;
1610 return 0;
1611 }
1612
1613 /* no arg passed */
1614 if (!str)
1615 return 0;
1616
1617 /* no setting it twice */
1618 if (opts->use_clockid)
1619 return -1;
1620
1621 opts->use_clockid = true;
1622
1623 /* if its a number, we're done */
1624 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001625 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001626
1627 /* allow a "CLOCK_" prefix to the name */
1628 if (!strncasecmp(str, "CLOCK_", 6))
1629 str += 6;
1630
1631 for (cm = clockids; cm->name; cm++) {
1632 if (!strcasecmp(str, cm->name)) {
1633 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001634 return get_clockid_res(opts->clockid,
1635 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001636 }
1637 }
1638
1639 opts->use_clockid = false;
1640 ui__warning("unknown clockid %s, check man page\n", ostr);
1641 return -1;
1642}
1643
Adrian Huntere9db1312015-04-09 18:53:46 +03001644static int record__parse_mmap_pages(const struct option *opt,
1645 const char *str,
1646 int unset __maybe_unused)
1647{
1648 struct record_opts *opts = opt->value;
1649 char *s, *p;
1650 unsigned int mmap_pages;
1651 int ret;
1652
1653 if (!str)
1654 return -EINVAL;
1655
1656 s = strdup(str);
1657 if (!s)
1658 return -ENOMEM;
1659
1660 p = strchr(s, ',');
1661 if (p)
1662 *p = '\0';
1663
1664 if (*s) {
1665 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1666 if (ret)
1667 goto out_free;
1668 opts->mmap_pages = mmap_pages;
1669 }
1670
1671 if (!p) {
1672 ret = 0;
1673 goto out_free;
1674 }
1675
1676 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1677 if (ret)
1678 goto out_free;
1679
1680 opts->auxtrace_mmap_pages = mmap_pages;
1681
1682out_free:
1683 free(s);
1684 return ret;
1685}
1686
Jiri Olsa0c582442017-01-09 10:51:59 +01001687static void switch_output_size_warn(struct record *rec)
1688{
1689 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1690 struct switch_output *s = &rec->switch_output;
1691
1692 wakeup_size /= 2;
1693
1694 if (s->size < wakeup_size) {
1695 char buf[100];
1696
1697 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1698 pr_warning("WARNING: switch-output data size lower than "
1699 "wakeup kernel buffer size (%s) "
1700 "expect bigger perf.data sizes\n", buf);
1701 }
1702}
1703
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001704static int switch_output_setup(struct record *rec)
1705{
1706 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001707 static struct parse_tag tags_size[] = {
1708 { .tag = 'B', .mult = 1 },
1709 { .tag = 'K', .mult = 1 << 10 },
1710 { .tag = 'M', .mult = 1 << 20 },
1711 { .tag = 'G', .mult = 1 << 30 },
1712 { .tag = 0 },
1713 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001714 static struct parse_tag tags_time[] = {
1715 { .tag = 's', .mult = 1 },
1716 { .tag = 'm', .mult = 60 },
1717 { .tag = 'h', .mult = 60*60 },
1718 { .tag = 'd', .mult = 60*60*24 },
1719 { .tag = 0 },
1720 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001721 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001722
1723 if (!s->set)
1724 return 0;
1725
1726 if (!strcmp(s->str, "signal")) {
1727 s->signal = true;
1728 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001729 goto enabled;
1730 }
1731
1732 val = parse_tag_value(s->str, tags_size);
1733 if (val != (unsigned long) -1) {
1734 s->size = val;
1735 pr_debug("switch-output with %s size threshold\n", s->str);
1736 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001737 }
1738
Jiri Olsabfacbe32017-01-09 10:52:00 +01001739 val = parse_tag_value(s->str, tags_time);
1740 if (val != (unsigned long) -1) {
1741 s->time = val;
1742 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1743 s->str, s->time);
1744 goto enabled;
1745 }
1746
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001747 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001748
1749enabled:
1750 rec->timestamp_filename = true;
1751 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001752
1753 if (s->size && !rec->opts.no_buffering)
1754 switch_output_size_warn(rec);
1755
Jiri Olsadc0c6122017-01-09 10:51:58 +01001756 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001757}
1758
Namhyung Kime5b2c202014-10-23 00:15:46 +09001759static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001760 "perf record [<options>] [<command>]",
1761 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001762 NULL
1763};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001764const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001765
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001766/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001767 * XXX Ideally would be local to cmd_record() and passed to a record__new
1768 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001769 * after cmd_record() exits, but since record_options need to be accessible to
1770 * builtin-script, leave it here.
1771 *
1772 * At least we don't ouch it in all the other functions here directly.
1773 *
1774 * Just say no to tons of global variables, sigh.
1775 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001776static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001777 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001778 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001779 .mmap_pages = UINT_MAX,
1780 .user_freq = UINT_MAX,
1781 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001782 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001783 .target = {
1784 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001785 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001786 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001787 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001788 .tool = {
1789 .sample = process_sample_event,
1790 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001791 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001792 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301793 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001794 .mmap = perf_event__process_mmap,
1795 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001796 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001797 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001798};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001799
Namhyung Kim76a26542015-10-22 23:28:32 +09001800const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1801 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001802
Wang Nan0aab2132016-06-16 08:02:41 +00001803static bool dry_run;
1804
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001805/*
1806 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1807 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001808 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001809 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1810 * using pipes, etc.
1811 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001812static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001813 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001814 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001815 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001816 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001817 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001818 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1819 NULL, "don't record events from perf itself",
1820 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001821 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001822 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001823 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001824 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001825 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001826 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001827 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001828 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001829 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001830 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001831 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001832 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001833 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001834 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001835 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001836 OPT_STRING('o', "output", &record.data.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001837 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001838 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1839 &record.opts.no_inherit_set,
1840 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001841 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1842 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001843 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu45178a92019-01-17 08:15:18 -08001844 OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001845 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1846 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001847 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1848 "profile at this frequency",
1849 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001850 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1851 "number of mmap data pages and AUX area tracing mmap pages",
1852 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001853 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001854 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001855 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001856 NULL, "enables call-graph recording" ,
1857 &record_callchain_opt),
1858 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001859 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001860 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001861 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001862 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001863 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001864 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001865 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001866 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001867 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1868 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001869 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001870 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1871 &record.opts.sample_time_set,
1872 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001873 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1874 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001875 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001876 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001877 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1878 &record.no_buildid_cache_set,
1879 "do not update the buildid cache"),
1880 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1881 &record.no_buildid_set,
1882 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001883 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001884 "monitor event in cgroup name only",
1885 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001886 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001887 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001888 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1889 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001890
1891 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1892 "branch any", "sample any taken branches",
1893 parse_branch_stack),
1894
1895 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1896 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001897 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001898 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1899 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001900 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1901 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001902 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1903 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001904 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1905 "sample selected machine registers on interrupt,"
1906 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001907 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1908 "sample selected machine registers on interrupt,"
1909 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001910 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1911 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001912 OPT_CALLBACK('k', "clockid", &record.opts,
1913 "clockid", "clockid to use for events, see clock_gettime()",
1914 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001915 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1916 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001917 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04001918 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301919 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1920 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001921 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1922 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001923 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1924 "Configure all used events to run in kernel space.",
1925 PARSE_OPT_EXCLUSIVE),
1926 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1927 "Configure all used events to run in user space.",
1928 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001929 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1930 "clang binary to use for compiling BPF scriptlets"),
1931 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1932 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001933 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1934 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001935 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1936 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001937 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1938 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08001939 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1940 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001941 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001942 &record.switch_output.set, "signal,size,time",
1943 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001944 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001945 OPT_BOOLEAN(0, "dry-run", &dry_run,
1946 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001947#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03001948 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
1949 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001950 record__aio_parse),
1951#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001952 OPT_END()
1953};
1954
Namhyung Kime5b2c202014-10-23 00:15:46 +09001955struct option *record_options = __record_options;
1956
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001957int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001958{
Adrian Hunteref149c22015-04-09 18:53:45 +03001959 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001960 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001961 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001962
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001963 setlocale(LC_ALL, "");
1964
Wang Nan48e1cab2015-12-14 10:39:22 +00001965#ifndef HAVE_LIBBPF_SUPPORT
1966# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1967 set_nobuild('\0', "clang-path", true);
1968 set_nobuild('\0', "clang-opt", true);
1969# undef set_nobuild
1970#endif
1971
He Kuang7efe0e02015-12-14 10:39:23 +00001972#ifndef HAVE_BPF_PROLOGUE
1973# if !defined (HAVE_DWARF_SUPPORT)
1974# define REASON "NO_DWARF=1"
1975# elif !defined (HAVE_LIBBPF_SUPPORT)
1976# define REASON "NO_LIBBPF=1"
1977# else
1978# define REASON "this architecture doesn't support BPF prologue"
1979# endif
1980# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1981 set_nobuild('\0', "vmlinux", true);
1982# undef set_nobuild
1983# undef REASON
1984#endif
1985
Alexey Budankov9d2ed642019-01-22 20:47:43 +03001986 CPU_ZERO(&rec->affinity_mask);
1987 rec->opts.affinity = PERF_AFFINITY_SYS;
1988
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001989 rec->evlist = perf_evlist__new();
1990 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001991 return -ENOMEM;
1992
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001993 err = perf_config(perf_record_config, rec);
1994 if (err)
1995 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001996
Tom Zanussibca647a2010-11-10 08:11:30 -06001997 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001998 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09001999 if (quiet)
2000 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002001
2002 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002003 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002004 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002005
Namhyung Kimbea03402012-04-26 14:15:15 +09002006 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002007 usage_with_options_msg(record_usage, record_options,
2008 "cgroup monitoring only available in system-wide mode");
2009
Stephane Eranian023695d2011-02-14 11:20:01 +02002010 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002011 if (rec->opts.record_switch_events &&
2012 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002013 ui__error("kernel does not support recording context switch events\n");
2014 parse_options_usage(record_usage, record_options, "switch-events", 0);
2015 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002016 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002017
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002018 if (switch_output_setup(rec)) {
2019 parse_options_usage(record_usage, record_options, "switch-output", 0);
2020 return -EINVAL;
2021 }
2022
Jiri Olsabfacbe32017-01-09 10:52:00 +01002023 if (rec->switch_output.time) {
2024 signal(SIGALRM, alarm_sig_handler);
2025 alarm(rec->switch_output.time);
2026 }
2027
Adrian Hunter1b36c032016-09-23 17:38:39 +03002028 /*
2029 * Allow aliases to facilitate the lookup of symbols for address
2030 * filters. Refer to auxtrace_parse_filters().
2031 */
2032 symbol_conf.allow_aliases = true;
2033
2034 symbol__init(NULL);
2035
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002036 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002037 if (err)
2038 goto out;
2039
Wang Nan0aab2132016-06-16 08:02:41 +00002040 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002041 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002042
Wang Nand7888572016-04-08 15:07:24 +00002043 err = bpf__setup_stdout(rec->evlist);
2044 if (err) {
2045 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2046 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2047 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002048 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002049 }
2050
Adrian Hunteref149c22015-04-09 18:53:45 +03002051 err = -ENOMEM;
2052
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002053 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002054 pr_warning(
2055"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2056"check /proc/sys/kernel/kptr_restrict.\n\n"
2057"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2058"file is not found in the buildid cache or in the vmlinux path.\n\n"
2059"Samples in kernel modules won't be resolved at all.\n\n"
2060"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2061"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002062
Wang Nan0c1d46a2016-04-20 18:59:52 +00002063 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002064 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002065 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002066 /*
2067 * In 'perf record --switch-output', disable buildid
2068 * generation by default to reduce data file switching
2069 * overhead. Still generate buildid if they are required
2070 * explicitly using
2071 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002072 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002073 * --no-no-buildid-cache
2074 *
2075 * Following code equals to:
2076 *
2077 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2078 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2079 * disable_buildid_cache();
2080 */
2081 bool disable = true;
2082
2083 if (rec->no_buildid_set && !rec->no_buildid)
2084 disable = false;
2085 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2086 disable = false;
2087 if (disable) {
2088 rec->no_buildid = true;
2089 rec->no_buildid_cache = true;
2090 disable_buildid_cache();
2091 }
2092 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002093
Wang Nan4ea648a2016-07-14 08:34:47 +00002094 if (record.opts.overwrite)
2095 record.opts.tail_synthesize = true;
2096
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002097 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002098 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002099 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002100 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002101 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002102
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002103 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2104 rec->opts.no_inherit = true;
2105
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002106 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002107 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002108 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002109 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002110 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002111
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002112 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002113 if (err) {
2114 int saved_errno = errno;
2115
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002116 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002117 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002118
2119 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002120 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002121 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002122
Mengting Zhangca800062017-12-13 15:01:53 +08002123 /* Enable ignoring missing threads when -u/-p option is defined. */
2124 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002125
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002126 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002127 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002128 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002129
Adrian Hunteref149c22015-04-09 18:53:45 +03002130 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2131 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002132 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002133
Namhyung Kim61566812016-01-11 22:37:09 +09002134 /*
2135 * We take all buildids when the file contains
2136 * AUX area tracing data because we do not decode the
2137 * trace because it would take too long.
2138 */
2139 if (rec->opts.full_auxtrace)
2140 rec->buildid_all = true;
2141
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002142 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002143 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002144 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002145 }
2146
Alexey Budankov93f20c02018-11-06 12:07:19 +03002147 if (rec->opts.nr_cblocks > nr_cblocks_max)
2148 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002149 if (verbose > 0)
2150 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2151
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002152 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
2153
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002154 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002155out:
Namhyung Kim45604712014-05-12 09:47:24 +09002156 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002157 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002158 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002159 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002160}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002161
2162static void snapshot_sig_handler(int sig __maybe_unused)
2163{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002164 struct record *rec = &record;
2165
Wang Nan5f9cf592016-04-20 18:59:49 +00002166 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2167 trigger_hit(&auxtrace_snapshot_trigger);
2168 auxtrace_record__snapshot_started = 1;
2169 if (auxtrace_record__snapshot_start(record.itr))
2170 trigger_error(&auxtrace_snapshot_trigger);
2171 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002172
Jiri Olsadc0c6122017-01-09 10:51:58 +01002173 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002174 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002175}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002176
2177static void alarm_sig_handler(int sig __maybe_unused)
2178{
2179 struct record *rec = &record;
2180
2181 if (switch_output_time(rec))
2182 trigger_hit(&switch_output_trigger);
2183}