blob: 88ea11d57c6f1a8103bd9728f8de1608b811ab45 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060026#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020027#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020028#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020029#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110030#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020031#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020032#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020033#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030034#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020035#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070036#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020037#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010065};
66
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030067struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020068 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030069 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020070 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010071 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030072 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 struct perf_evlist *evlist;
74 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000077 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000079 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090080 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000081 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080082 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010083 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070084 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020085};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020086
Jiri Olsadc0c6122017-01-09 10:51:58 +010087static volatile int auxtrace_record__snapshot_started;
88static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
89static DEFINE_TRIGGER(switch_output_trigger);
90
91static bool switch_output_signal(struct record *rec)
92{
93 return rec->switch_output.signal &&
94 trigger_is_ready(&switch_output_trigger);
95}
96
97static bool switch_output_size(struct record *rec)
98{
99 return rec->switch_output.size &&
100 trigger_is_ready(&switch_output_trigger) &&
101 (rec->bytes_written >= rec->switch_output.size);
102}
103
Jiri Olsabfacbe32017-01-09 10:52:00 +0100104static bool switch_output_time(struct record *rec)
105{
106 return rec->switch_output.time &&
107 trigger_is_ready(&switch_output_trigger);
108}
109
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200110static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
111 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200112{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200113 struct perf_data_file *file = &rec->session->data->file;
114
115 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100116 pr_err("failed to write perf data, error: %m\n");
117 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200118 }
David Ahern8d3eca22012-08-26 12:24:47 -0600119
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300120 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100121
122 if (switch_output_size(rec))
123 trigger_hit(&switch_output_trigger);
124
David Ahern8d3eca22012-08-26 12:24:47 -0600125 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126}
127
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300128#ifdef HAVE_AIO_SUPPORT
129static int record__aio_write(struct aiocb *cblock, int trace_fd,
130 void *buf, size_t size, off_t off)
131{
132 int rc;
133
134 cblock->aio_fildes = trace_fd;
135 cblock->aio_buf = buf;
136 cblock->aio_nbytes = size;
137 cblock->aio_offset = off;
138 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
139
140 do {
141 rc = aio_write(cblock);
142 if (rc == 0) {
143 break;
144 } else if (errno != EAGAIN) {
145 cblock->aio_fildes = -1;
146 pr_err("failed to queue perf data, error: %m\n");
147 break;
148 }
149 } while (1);
150
151 return rc;
152}
153
154static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
155{
156 void *rem_buf;
157 off_t rem_off;
158 size_t rem_size;
159 int rc, aio_errno;
160 ssize_t aio_ret, written;
161
162 aio_errno = aio_error(cblock);
163 if (aio_errno == EINPROGRESS)
164 return 0;
165
166 written = aio_ret = aio_return(cblock);
167 if (aio_ret < 0) {
168 if (aio_errno != EINTR)
169 pr_err("failed to write perf data, error: %m\n");
170 written = 0;
171 }
172
173 rem_size = cblock->aio_nbytes - written;
174
175 if (rem_size == 0) {
176 cblock->aio_fildes = -1;
177 /*
178 * md->refcount is incremented in perf_mmap__push() for
179 * every enqueued aio write request so decrement it because
180 * the request is now complete.
181 */
182 perf_mmap__put(md);
183 rc = 1;
184 } else {
185 /*
186 * aio write request may require restart with the
187 * reminder if the kernel didn't write whole
188 * chunk at once.
189 */
190 rem_off = cblock->aio_offset + written;
191 rem_buf = (void *)(cblock->aio_buf + written);
192 record__aio_write(cblock, cblock->aio_fildes,
193 rem_buf, rem_size, rem_off);
194 rc = 0;
195 }
196
197 return rc;
198}
199
Alexey Budankov93f20c02018-11-06 12:07:19 +0300200static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300201{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300202 struct aiocb **aiocb = md->aio.aiocb;
203 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300204 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300205 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300206
207 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300208 do_suspend = 0;
209 for (i = 0; i < md->aio.nr_cblocks; ++i) {
210 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
211 if (sync_all)
212 aiocb[i] = NULL;
213 else
214 return i;
215 } else {
216 /*
217 * Started aio write is not complete yet
218 * so it has to be waited before the
219 * next allocation.
220 */
221 aiocb[i] = &cblocks[i];
222 do_suspend = 1;
223 }
224 }
225 if (!do_suspend)
226 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300227
Alexey Budankov93f20c02018-11-06 12:07:19 +0300228 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300229 if (!(errno == EAGAIN || errno == EINTR))
230 pr_err("failed to sync perf data, error: %m\n");
231 }
232 } while (1);
233}
234
235static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
236{
237 struct record *rec = to;
238 int ret, trace_fd = rec->session->data->file.fd;
239
240 rec->samples++;
241
242 ret = record__aio_write(cblock, trace_fd, bf, size, off);
243 if (!ret) {
244 rec->bytes_written += size;
245 if (switch_output_size(rec))
246 trigger_hit(&switch_output_trigger);
247 }
248
249 return ret;
250}
251
252static off_t record__aio_get_pos(int trace_fd)
253{
254 return lseek(trace_fd, 0, SEEK_CUR);
255}
256
257static void record__aio_set_pos(int trace_fd, off_t pos)
258{
259 lseek(trace_fd, pos, SEEK_SET);
260}
261
262static void record__aio_mmap_read_sync(struct record *rec)
263{
264 int i;
265 struct perf_evlist *evlist = rec->evlist;
266 struct perf_mmap *maps = evlist->mmap;
267
268 if (!rec->opts.nr_cblocks)
269 return;
270
271 for (i = 0; i < evlist->nr_mmaps; i++) {
272 struct perf_mmap *map = &maps[i];
273
274 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300275 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300276 }
277}
278
279static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300280static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300281
282static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300283 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300284 int unset)
285{
286 struct record_opts *opts = (struct record_opts *)opt->value;
287
Alexey Budankov93f20c02018-11-06 12:07:19 +0300288 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300289 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300290 } else {
291 if (str)
292 opts->nr_cblocks = strtol(str, NULL, 0);
293 if (!opts->nr_cblocks)
294 opts->nr_cblocks = nr_cblocks_default;
295 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300296
297 return 0;
298}
299#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300300static int nr_cblocks_max = 0;
301
302static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300303{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300304 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300305}
306
307static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
308 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
309{
310 return -1;
311}
312
313static off_t record__aio_get_pos(int trace_fd __maybe_unused)
314{
315 return -1;
316}
317
318static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
319{
320}
321
322static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
323{
324}
325#endif
326
327static int record__aio_enabled(struct record *rec)
328{
329 return rec->opts.nr_cblocks > 0;
330}
331
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200332static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200333 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300334 struct perf_sample *sample __maybe_unused,
335 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200336{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300337 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200338 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200339}
340
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200341static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300342{
343 struct record *rec = to;
344
345 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200346 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300347}
348
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300349static volatile int done;
350static volatile int signr = -1;
351static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000352
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300353static void sig_handler(int sig)
354{
355 if (sig == SIGCHLD)
356 child_finished = 1;
357 else
358 signr = sig;
359
360 done = 1;
361}
362
Wang Nana0748652016-11-26 07:03:28 +0000363static void sigsegv_handler(int sig)
364{
365 perf_hooks__recover();
366 sighandler_dump_stack(sig);
367}
368
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300369static void record__sig_exit(void)
370{
371 if (signr == -1)
372 return;
373
374 signal(signr, SIG_DFL);
375 raise(signr);
376}
377
Adrian Huntere31f0d02015-04-30 17:37:27 +0300378#ifdef HAVE_AUXTRACE_SUPPORT
379
Adrian Hunteref149c22015-04-09 18:53:45 +0300380static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200381 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300382 union perf_event *event, void *data1,
383 size_t len1, void *data2, size_t len2)
384{
385 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100386 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300387 size_t padding;
388 u8 pad[8] = {0};
389
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100390 if (!perf_data__is_pipe(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300391 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100392 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300393 int err;
394
395 file_offset = lseek(fd, 0, SEEK_CUR);
396 if (file_offset == -1)
397 return -1;
398 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
399 event, file_offset);
400 if (err)
401 return err;
402 }
403
Adrian Hunteref149c22015-04-09 18:53:45 +0300404 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
405 padding = (len1 + len2) & 7;
406 if (padding)
407 padding = 8 - padding;
408
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200409 record__write(rec, map, event, event->header.size);
410 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300411 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200412 record__write(rec, map, data2, len2);
413 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300414
415 return 0;
416}
417
418static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200419 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300420{
421 int ret;
422
Jiri Olsae035f4c2018-09-13 14:54:05 +0200423 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300424 record__process_auxtrace);
425 if (ret < 0)
426 return ret;
427
428 if (ret)
429 rec->samples++;
430
431 return 0;
432}
433
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300434static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200435 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300436{
437 int ret;
438
Jiri Olsae035f4c2018-09-13 14:54:05 +0200439 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300440 record__process_auxtrace,
441 rec->opts.auxtrace_snapshot_size);
442 if (ret < 0)
443 return ret;
444
445 if (ret)
446 rec->samples++;
447
448 return 0;
449}
450
451static int record__auxtrace_read_snapshot_all(struct record *rec)
452{
453 int i;
454 int rc = 0;
455
456 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200457 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300458
Jiri Olsae035f4c2018-09-13 14:54:05 +0200459 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300460 continue;
461
Jiri Olsae035f4c2018-09-13 14:54:05 +0200462 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300463 rc = -1;
464 goto out;
465 }
466 }
467out:
468 return rc;
469}
470
471static void record__read_auxtrace_snapshot(struct record *rec)
472{
473 pr_debug("Recording AUX area tracing snapshot\n");
474 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000475 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300476 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000477 if (auxtrace_record__snapshot_finish(rec->itr))
478 trigger_error(&auxtrace_snapshot_trigger);
479 else
480 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300481 }
482}
483
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200484static int record__auxtrace_init(struct record *rec)
485{
486 int err;
487
488 if (!rec->itr) {
489 rec->itr = auxtrace_record__init(rec->evlist, &err);
490 if (err)
491 return err;
492 }
493
494 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
495 rec->opts.auxtrace_snapshot_opts);
496 if (err)
497 return err;
498
499 return auxtrace_parse_filters(rec->evlist);
500}
501
Adrian Huntere31f0d02015-04-30 17:37:27 +0300502#else
503
504static inline
505int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200506 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300507{
508 return 0;
509}
510
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300511static inline
512void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
513{
514}
515
516static inline
517int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
518{
519 return 0;
520}
521
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200522static int record__auxtrace_init(struct record *rec __maybe_unused)
523{
524 return 0;
525}
526
Adrian Huntere31f0d02015-04-30 17:37:27 +0300527#endif
528
Wang Nancda57a82016-06-27 10:24:03 +0000529static int record__mmap_evlist(struct record *rec,
530 struct perf_evlist *evlist)
531{
532 struct record_opts *opts = &rec->opts;
533 char msg[512];
534
Wang Nan7a276ff2017-12-03 02:00:38 +0000535 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000536 opts->auxtrace_mmap_pages,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300537 opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000538 if (errno == EPERM) {
539 pr_err("Permission error mapping pages.\n"
540 "Consider increasing "
541 "/proc/sys/kernel/perf_event_mlock_kb,\n"
542 "or try again with a smaller value of -m/--mmap_pages.\n"
543 "(current value: %u,%u)\n",
544 opts->mmap_pages, opts->auxtrace_mmap_pages);
545 return -errno;
546 } else {
547 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300548 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000549 if (errno)
550 return -errno;
551 else
552 return -EINVAL;
553 }
554 }
555 return 0;
556}
557
558static int record__mmap(struct record *rec)
559{
560 return record__mmap_evlist(rec, rec->evlist);
561}
562
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300563static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200564{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300565 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200566 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200567 struct perf_evlist *evlist = rec->evlist;
568 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300569 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600570 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600571 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200572
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300573 /*
574 * For initial_delay we need to add a dummy event so that we can track
575 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
576 * real events, the ones asked by the user.
577 */
578 if (opts->initial_delay) {
579 if (perf_evlist__add_dummy(evlist))
580 return -ENOMEM;
581
582 pos = perf_evlist__first(evlist);
583 pos->tracking = 0;
584 pos = perf_evlist__last(evlist);
585 pos->tracking = 1;
586 pos->attr.enable_on_exec = 1;
587 }
588
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300589 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100590
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300591 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200592try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400593 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300594 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900595 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300596 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300597 goto try_again;
598 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700599 if ((errno == EINVAL || errno == EBADF) &&
600 pos->leader != pos &&
601 pos->weak_group) {
602 pos = perf_evlist__reset_weak_group(evlist, pos);
603 goto try_again;
604 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300605 rc = -errno;
606 perf_evsel__open_strerror(pos, &opts->target,
607 errno, msg, sizeof(msg));
608 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600609 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300610 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800611
612 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800613 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200614
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300615 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300616 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300617 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300618 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600619 rc = -1;
620 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100621 }
622
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600623 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300624 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600625 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
626 str_error_r(errno, msg, sizeof(msg)));
627 rc = -1;
628 goto out;
629 }
630
Wang Nancda57a82016-06-27 10:24:03 +0000631 rc = record__mmap(rec);
632 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600633 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200634
Jiri Olsa563aecb2013-06-05 13:35:06 +0200635 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300636 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600637out:
638 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200639}
640
Namhyung Kime3d59112015-01-29 17:06:44 +0900641static int process_sample_event(struct perf_tool *tool,
642 union perf_event *event,
643 struct perf_sample *sample,
644 struct perf_evsel *evsel,
645 struct machine *machine)
646{
647 struct record *rec = container_of(tool, struct record, tool);
648
Jin Yao68588ba2017-12-08 21:13:42 +0800649 if (rec->evlist->first_sample_time == 0)
650 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900651
Jin Yao68588ba2017-12-08 21:13:42 +0800652 rec->evlist->last_sample_time = sample->time;
653
654 if (rec->buildid_all)
655 return 0;
656
657 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900658 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
659}
660
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300661static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200662{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100663 struct perf_data *data = &rec->data;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200664 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200665
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100666 if (data->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300667 return 0;
668
Namhyung Kim00dc8652014-11-04 10:14:32 +0900669 /*
670 * During this process, it'll load kernel map and replace the
671 * dso->long_name to a real pathname it found. In this case
672 * we prefer the vmlinux path like
673 * /lib/modules/3.16.4/build/vmlinux
674 *
675 * rather than build-id path (in debug directory).
676 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
677 */
678 symbol_conf.ignore_vmlinux_buildid = true;
679
Namhyung Kim61566812016-01-11 22:37:09 +0900680 /*
681 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800682 * so no need to process samples. But if timestamp_boundary is enabled,
683 * it still needs to walk on all samples to get the timestamps of
684 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900685 */
Jin Yao68588ba2017-12-08 21:13:42 +0800686 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900687 rec->tool.sample = NULL;
688
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300689 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200690}
691
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200692static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800693{
694 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200695 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800696 /*
697 *As for guest kernel when processing subcommand record&report,
698 *we arrange module mmap prior to guest kernel mmap and trigger
699 *a preload dso because default guest module symbols are loaded
700 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
701 *method is used to avoid symbol missing when the first addr is
702 *in module instead of in guest kernel.
703 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200704 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200705 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800706 if (err < 0)
707 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300708 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800709
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800710 /*
711 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
712 * have no _text sometimes.
713 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200714 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200715 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800716 if (err < 0)
717 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300718 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800719}
720
Frederic Weisbecker98402802010-05-02 22:05:29 +0200721static struct perf_event_header finished_round_event = {
722 .size = sizeof(struct perf_event_header),
723 .type = PERF_RECORD_FINISHED_ROUND,
724};
725
Wang Nana4ea0ec2016-07-14 08:34:36 +0000726static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Wang Nan0b72d692017-12-04 16:51:07 +0000727 bool overwrite)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200728{
Jiri Olsadcabb502014-07-25 16:56:16 +0200729 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200730 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600731 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000732 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300733 int trace_fd = rec->data.file.fd;
734 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200735
Wang Nancb216862016-06-27 10:24:04 +0000736 if (!evlist)
737 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300738
Wang Nan0b72d692017-12-04 16:51:07 +0000739 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000740 if (!maps)
741 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000742
Wang Nan0b72d692017-12-04 16:51:07 +0000743 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000744 return 0;
745
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300746 if (record__aio_enabled(rec))
747 off = record__aio_get_pos(trace_fd);
748
Wang Nana4ea0ec2016-07-14 08:34:36 +0000749 for (i = 0; i < evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200750 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000751
Jiri Olsae035f4c2018-09-13 14:54:05 +0200752 if (map->base) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300753 if (!record__aio_enabled(rec)) {
754 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
755 rc = -1;
756 goto out;
757 }
758 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300759 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300760 /*
761 * Call record__aio_sync() to wait till map->data buffer
762 * becomes available after previous aio write request.
763 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300764 idx = record__aio_sync(map, false);
765 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300766 record__aio_set_pos(trace_fd, off);
767 rc = -1;
768 goto out;
769 }
David Ahern8d3eca22012-08-26 12:24:47 -0600770 }
771 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300772
Jiri Olsae035f4c2018-09-13 14:54:05 +0200773 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
774 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300775 rc = -1;
776 goto out;
777 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200778 }
779
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300780 if (record__aio_enabled(rec))
781 record__aio_set_pos(trace_fd, off);
782
Jiri Olsadcabb502014-07-25 16:56:16 +0200783 /*
784 * Mark the round finished in case we wrote
785 * at least one event.
786 */
787 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200788 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600789
Wang Nan0b72d692017-12-04 16:51:07 +0000790 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000791 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600792out:
793 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200794}
795
Wang Nancb216862016-06-27 10:24:04 +0000796static int record__mmap_read_all(struct record *rec)
797{
798 int err;
799
Wang Nana4ea0ec2016-07-14 08:34:36 +0000800 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000801 if (err)
802 return err;
803
Wang Nan057374642016-07-14 08:34:43 +0000804 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000805}
806
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300807static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700808{
David Ahern57706ab2013-11-06 11:41:34 -0700809 struct perf_session *session = rec->session;
810 int feat;
811
812 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
813 perf_header__set_feat(&session->header, feat);
814
815 if (rec->no_buildid)
816 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
817
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300818 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700819 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
820
821 if (!rec->opts.branch_stack)
822 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300823
824 if (!rec->opts.full_auxtrace)
825 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100826
Alexey Budankovcf790512018-10-09 17:36:24 +0300827 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
828 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
829
Jiri Olsaffa517a2015-10-25 15:51:43 +0100830 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700831}
832
Wang Nane1ab48b2016-02-26 09:32:10 +0000833static void
834record__finish_output(struct record *rec)
835{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100836 struct perf_data *data = &rec->data;
837 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000838
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100839 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000840 return;
841
842 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100843 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000844
845 if (!rec->no_buildid) {
846 process_buildids(rec);
847
848 if (rec->buildid_all)
849 dsos__hit_all(rec->session);
850 }
851 perf_session__write_header(rec->session, rec->evlist, fd, true);
852
853 return;
854}
855
Wang Nan4ea648a2016-07-14 08:34:47 +0000856static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000857{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300858 int err;
859 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000860
Wang Nan4ea648a2016-07-14 08:34:47 +0000861 if (rec->opts.tail_synthesize != tail)
862 return 0;
863
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300864 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
865 if (thread_map == NULL)
866 return -1;
867
868 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000869 process_synthesized_event,
870 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800871 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300872 thread_map__put(thread_map);
873 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000874}
875
Wang Nan4ea648a2016-07-14 08:34:47 +0000876static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000877
Wang Nanecfd7a92016-04-13 08:21:07 +0000878static int
879record__switch_output(struct record *rec, bool at_exit)
880{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100881 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000882 int fd, err;
883
884 /* Same Size: "2015122520103046"*/
885 char timestamp[] = "InvalidTimestamp";
886
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300887 record__aio_mmap_read_sync(rec);
888
Wang Nan4ea648a2016-07-14 08:34:47 +0000889 record__synthesize(rec, true);
890 if (target__none(&rec->opts.target))
891 record__synthesize_workload(rec, true);
892
Wang Nanecfd7a92016-04-13 08:21:07 +0000893 rec->samples = 0;
894 record__finish_output(rec);
895 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
896 if (err) {
897 pr_err("Failed to get current timestamp\n");
898 return -EINVAL;
899 }
900
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100901 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000902 rec->session->header.data_offset,
903 at_exit);
904 if (fd >= 0 && !at_exit) {
905 rec->bytes_written = 0;
906 rec->session->header.data_size = 0;
907 }
908
909 if (!quiet)
910 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +0100911 data->file.path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000912
913 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000914 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000915 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000916
Wang Nanbe7b0c92016-04-20 18:59:54 +0000917 /*
918 * In 'perf record --switch-output' without -a,
919 * record__synthesize() in record__switch_output() won't
920 * generate tracking events because there's no thread_map
921 * in evlist. Which causes newly created perf.data doesn't
922 * contain map and comm information.
923 * Create a fake thread_map and directly call
924 * perf_event__synthesize_thread_map() for those events.
925 */
926 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000927 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000928 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000929 return fd;
930}
931
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300932static volatile int workload_exec_errno;
933
934/*
935 * perf_evlist__prepare_workload will send a SIGUSR1
936 * if the fork fails, since we asked by setting its
937 * want_signal to true.
938 */
Namhyung Kim45604712014-05-12 09:47:24 +0900939static void workload_exec_failed_signal(int signo __maybe_unused,
940 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300941 void *ucontext __maybe_unused)
942{
943 workload_exec_errno = info->si_value.sival_int;
944 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300945 child_finished = 1;
946}
947
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300948static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +0100949static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300950
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200951int __weak
952perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
953 struct perf_tool *tool __maybe_unused,
954 perf_event__handler_t process __maybe_unused,
955 struct machine *machine __maybe_unused)
956{
957 return 0;
958}
959
Wang Nanee667f92016-06-27 10:24:05 +0000960static const struct perf_event_mmap_page *
961perf_evlist__pick_pc(struct perf_evlist *evlist)
962{
Wang Nanb2cb6152016-07-14 08:34:39 +0000963 if (evlist) {
964 if (evlist->mmap && evlist->mmap[0].base)
965 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +0000966 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
967 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +0000968 }
Wang Nanee667f92016-06-27 10:24:05 +0000969 return NULL;
970}
971
Wang Nanc45628b2016-05-24 02:28:59 +0000972static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
973{
Wang Nanee667f92016-06-27 10:24:05 +0000974 const struct perf_event_mmap_page *pc;
975
976 pc = perf_evlist__pick_pc(rec->evlist);
977 if (pc)
978 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000979 return NULL;
980}
981
Wang Nan4ea648a2016-07-14 08:34:47 +0000982static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000983{
984 struct perf_session *session = rec->session;
985 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100986 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +0000987 struct record_opts *opts = &rec->opts;
988 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100989 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +0000990 int err = 0;
991
Wang Nan4ea648a2016-07-14 08:34:47 +0000992 if (rec->opts.tail_synthesize != tail)
993 return 0;
994
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100995 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +0100996 /*
997 * We need to synthesize events first, because some
998 * features works on top of them (on report side).
999 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001000 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001001 process_synthesized_event);
1002 if (err < 0) {
1003 pr_err("Couldn't synthesize attrs.\n");
1004 goto out;
1005 }
1006
Jiri Olsaa2015512018-03-14 10:22:04 +01001007 err = perf_event__synthesize_features(tool, session, rec->evlist,
1008 process_synthesized_event);
1009 if (err < 0) {
1010 pr_err("Couldn't synthesize features.\n");
1011 return err;
1012 }
1013
Wang Nanc45c86e2016-02-26 09:32:07 +00001014 if (have_tracepoints(&rec->evlist->entries)) {
1015 /*
1016 * FIXME err <= 0 here actually means that
1017 * there were no tracepoints so its not really
1018 * an error, just that we don't need to
1019 * synthesize anything. We really have to
1020 * return this more properly and also
1021 * propagate errors that now are calling die()
1022 */
1023 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1024 process_synthesized_event);
1025 if (err <= 0) {
1026 pr_err("Couldn't record tracing data.\n");
1027 goto out;
1028 }
1029 rec->bytes_written += err;
1030 }
1031 }
1032
Wang Nanc45628b2016-05-24 02:28:59 +00001033 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001034 process_synthesized_event, machine);
1035 if (err)
1036 goto out;
1037
Wang Nanc45c86e2016-02-26 09:32:07 +00001038 if (rec->opts.full_auxtrace) {
1039 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1040 session, process_synthesized_event);
1041 if (err)
1042 goto out;
1043 }
1044
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001045 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1046 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1047 machine);
1048 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1049 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1050 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001051
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001052 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1053 machine);
1054 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1055 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1056 "Check /proc/modules permission or run as root.\n");
1057 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001058
1059 if (perf_guest) {
1060 machines__process_guests(&session->machines,
1061 perf_event__synthesize_guest_os, tool);
1062 }
1063
Andi Kleenbfd8f722017-11-17 13:42:58 -08001064 err = perf_event__synthesize_extra_attr(&rec->tool,
1065 rec->evlist,
1066 process_synthesized_event,
1067 data->is_pipe);
1068 if (err)
1069 goto out;
1070
Andi Kleen373565d2017-11-17 13:42:59 -08001071 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1072 process_synthesized_event,
1073 NULL);
1074 if (err < 0) {
1075 pr_err("Couldn't synthesize thread map.\n");
1076 return err;
1077 }
1078
1079 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1080 process_synthesized_event, NULL);
1081 if (err < 0) {
1082 pr_err("Couldn't synthesize cpu map.\n");
1083 return err;
1084 }
1085
Song Liu7b612e22019-01-17 08:15:19 -08001086 err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
1087 machine, opts);
1088 if (err < 0)
1089 pr_warning("Couldn't synthesize bpf events.\n");
1090
Wang Nanc45c86e2016-02-26 09:32:07 +00001091 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1092 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001093 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001094out:
1095 return err;
1096}
1097
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001098static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001099{
David Ahern57706ab2013-11-06 11:41:34 -07001100 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001101 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001102 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001103 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001104 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001105 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001106 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001107 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001108 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001109 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001110
Namhyung Kim45604712014-05-12 09:47:24 +09001111 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001112 signal(SIGCHLD, sig_handler);
1113 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001114 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001115 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001116
Hari Bathinif3b36142017-03-08 02:11:43 +05301117 if (rec->opts.record_namespaces)
1118 tool->namespace_events = true;
1119
Jiri Olsadc0c6122017-01-09 10:51:58 +01001120 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001121 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001122 if (rec->opts.auxtrace_snapshot_mode)
1123 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001124 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001125 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001126 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001127 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001128 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001129
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001130 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001131 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001132 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001133 return -1;
1134 }
1135
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001136 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001137 rec->session = session;
1138
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001139 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001140
Alexey Budankovcf790512018-10-09 17:36:24 +03001141 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1142 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1143
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001144 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001145 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001146 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001147 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001148 if (err < 0) {
1149 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001150 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001151 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001152 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001153 }
1154
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001155 /*
1156 * If we have just single event and are sending data
1157 * through pipe, we need to force the ids allocation,
1158 * because we synthesize event name through the pipe
1159 * and need the id for that.
1160 */
1161 if (data->is_pipe && rec->evlist->nr_entries == 1)
1162 rec->opts.sample_id = true;
1163
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001164 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001165 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001166 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001167 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001168
Wang Nan8690a2a2016-02-22 09:10:32 +00001169 err = bpf__apply_obj_config();
1170 if (err) {
1171 char errbuf[BUFSIZ];
1172
1173 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1174 pr_err("ERROR: Apply config to BPF failed: %s\n",
1175 errbuf);
1176 goto out_child;
1177 }
1178
Adrian Huntercca84822015-08-19 17:29:21 +03001179 /*
1180 * Normally perf_session__new would do this, but it doesn't have the
1181 * evlist.
1182 */
1183 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1184 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1185 rec->tool.ordered_events = false;
1186 }
1187
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001188 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001189 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1190
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001191 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001192 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001193 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001194 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001195 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001196 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001197 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001198 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001199 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001200
David Ahernd3665492012-02-06 15:27:52 -07001201 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001202 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001203 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001204 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001205 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001206 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001207 }
1208
Wang Nan4ea648a2016-07-14 08:34:47 +00001209 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001210 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001211 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001212
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001213 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001214 struct sched_param param;
1215
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001216 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001217 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001218 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001219 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001220 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001221 }
1222 }
1223
Jiri Olsa774cb492012-11-12 18:34:01 +01001224 /*
1225 * When perf is starting the traced process, all the events
1226 * (apart from group members) have enable_on_exec=1 set,
1227 * so don't spoil it by prematurely enabling them.
1228 */
Andi Kleen6619a532014-01-11 13:38:27 -08001229 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001230 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001231
Peter Zijlstra856e9662009-12-16 17:55:55 +01001232 /*
1233 * Let the child rip
1234 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001235 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001236 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001237 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301238 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001239
1240 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1241 if (event == NULL) {
1242 err = -ENOMEM;
1243 goto out_child;
1244 }
1245
Namhyung Kime803cf92015-09-22 09:24:55 +09001246 /*
1247 * Some H/W events are generated before COMM event
1248 * which is emitted during exec(), so perf script
1249 * cannot see a correct process name for those events.
1250 * Synthesize COMM event to prevent it.
1251 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301252 tgid = perf_event__synthesize_comm(tool, event,
1253 rec->evlist->workload.pid,
1254 process_synthesized_event,
1255 machine);
1256 free(event);
1257
1258 if (tgid == -1)
1259 goto out_child;
1260
1261 event = malloc(sizeof(event->namespaces) +
1262 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1263 machine->id_hdr_size);
1264 if (event == NULL) {
1265 err = -ENOMEM;
1266 goto out_child;
1267 }
1268
1269 /*
1270 * Synthesize NAMESPACES event for the command specified.
1271 */
1272 perf_event__synthesize_namespaces(tool, event,
1273 rec->evlist->workload.pid,
1274 tgid, process_synthesized_event,
1275 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001276 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001277
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001278 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001279 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001280
Andi Kleen6619a532014-01-11 13:38:27 -08001281 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001282 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001283 perf_evlist__enable(rec->evlist);
1284 }
1285
Wang Nan5f9cf592016-04-20 18:59:49 +00001286 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001287 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001288 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001289 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001290 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001291
Wang Nan057374642016-07-14 08:34:43 +00001292 /*
1293 * rec->evlist->bkw_mmap_state is possible to be
1294 * BKW_MMAP_EMPTY here: when done == true and
1295 * hits != rec->samples in previous round.
1296 *
1297 * perf_evlist__toggle_bkw_mmap ensure we never
1298 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1299 */
1300 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1301 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1302
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001303 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001304 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001305 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001306 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001307 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001308 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001309
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001310 if (auxtrace_record__snapshot_started) {
1311 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001312 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001313 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001314 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001315 pr_err("AUX area tracing snapshot failed\n");
1316 err = -1;
1317 goto out_child;
1318 }
1319 }
1320
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001321 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001322 /*
1323 * If switch_output_trigger is hit, the data in
1324 * overwritable ring buffer should have been collected,
1325 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1326 *
1327 * If SIGUSR2 raise after or during record__mmap_read_all(),
1328 * record__mmap_read_all() didn't collect data from
1329 * overwritable ring buffer. Read again.
1330 */
1331 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1332 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001333 trigger_ready(&switch_output_trigger);
1334
Wang Nan057374642016-07-14 08:34:43 +00001335 /*
1336 * Reenable events in overwrite ring buffer after
1337 * record__mmap_read_all(): we should have collected
1338 * data from it.
1339 */
1340 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1341
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001342 if (!quiet)
1343 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1344 waking);
1345 waking = 0;
1346 fd = record__switch_output(rec, false);
1347 if (fd < 0) {
1348 pr_err("Failed to switch to new file\n");
1349 trigger_error(&switch_output_trigger);
1350 err = fd;
1351 goto out_child;
1352 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001353
1354 /* re-arm the alarm */
1355 if (rec->switch_output.time)
1356 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001357 }
1358
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001359 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001360 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001361 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001362 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001363 /*
1364 * Propagate error, only if there's any. Ignore positive
1365 * number of returned events and interrupt error.
1366 */
1367 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001368 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001369 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001370
1371 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1372 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001373 }
1374
Jiri Olsa774cb492012-11-12 18:34:01 +01001375 /*
1376 * When perf is starting the traced process, at the end events
1377 * die with the process and we wait for that. Thus no need to
1378 * disable events in this case.
1379 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001380 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001381 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001382 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001383 disabled = true;
1384 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001385 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001386 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001387 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001388
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001389 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001390 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001391 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001392 pr_err("Workload failed: %s\n", emsg);
1393 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001394 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001395 }
1396
Namhyung Kime3d59112015-01-29 17:06:44 +09001397 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001398 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001399
Wang Nan4ea648a2016-07-14 08:34:47 +00001400 if (target__none(&rec->opts.target))
1401 record__synthesize_workload(rec, true);
1402
Namhyung Kim45604712014-05-12 09:47:24 +09001403out_child:
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001404 record__aio_mmap_read_sync(rec);
1405
Namhyung Kim45604712014-05-12 09:47:24 +09001406 if (forks) {
1407 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001408
Namhyung Kim45604712014-05-12 09:47:24 +09001409 if (!child_finished)
1410 kill(rec->evlist->workload.pid, SIGTERM);
1411
1412 wait(&exit_status);
1413
1414 if (err < 0)
1415 status = err;
1416 else if (WIFEXITED(exit_status))
1417 status = WEXITSTATUS(exit_status);
1418 else if (WIFSIGNALED(exit_status))
1419 signr = WTERMSIG(exit_status);
1420 } else
1421 status = err;
1422
Wang Nan4ea648a2016-07-14 08:34:47 +00001423 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001424 /* this will be recalculated during process_buildids() */
1425 rec->samples = 0;
1426
Wang Nanecfd7a92016-04-13 08:21:07 +00001427 if (!err) {
1428 if (!rec->timestamp_filename) {
1429 record__finish_output(rec);
1430 } else {
1431 fd = record__switch_output(rec, true);
1432 if (fd < 0) {
1433 status = fd;
1434 goto out_delete_session;
1435 }
1436 }
1437 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001438
Wang Nana0748652016-11-26 07:03:28 +00001439 perf_hooks__invoke_record_end();
1440
Namhyung Kime3d59112015-01-29 17:06:44 +09001441 if (!err && !quiet) {
1442 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001443 const char *postfix = rec->timestamp_filename ?
1444 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001445
Adrian Hunteref149c22015-04-09 18:53:45 +03001446 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001447 scnprintf(samples, sizeof(samples),
1448 " (%" PRIu64 " samples)", rec->samples);
1449 else
1450 samples[0] = '\0';
1451
Wang Nanecfd7a92016-04-13 08:21:07 +00001452 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001453 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001454 data->file.path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001455 }
1456
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001457out_delete_session:
1458 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001459 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001460}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001461
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001462static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001463{
Kan Liangaad2b212015-01-05 13:23:04 -05001464 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001465
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001466 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001467
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001468 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001469 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001470 callchain->dump_size);
1471}
1472
1473int record_opts__parse_callchain(struct record_opts *record,
1474 struct callchain_param *callchain,
1475 const char *arg, bool unset)
1476{
1477 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001478 callchain->enabled = !unset;
1479
1480 /* --no-call-graph */
1481 if (unset) {
1482 callchain->record_mode = CALLCHAIN_NONE;
1483 pr_debug("callchain: disabled\n");
1484 return 0;
1485 }
1486
1487 ret = parse_callchain_record_opt(arg, callchain);
1488 if (!ret) {
1489 /* Enable data address sampling for DWARF unwind. */
1490 if (callchain->record_mode == CALLCHAIN_DWARF)
1491 record->sample_address = true;
1492 callchain_debug(callchain);
1493 }
1494
1495 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001496}
1497
Kan Liangc421e802015-07-29 05:42:12 -04001498int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001499 const char *arg,
1500 int unset)
1501{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001502 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001503}
1504
Kan Liangc421e802015-07-29 05:42:12 -04001505int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001506 const char *arg __maybe_unused,
1507 int unset __maybe_unused)
1508{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001509 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001510
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001511 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001512
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001513 if (callchain->record_mode == CALLCHAIN_NONE)
1514 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001515
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001516 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001517 return 0;
1518}
1519
Jiri Olsaeb853e82014-02-03 12:44:42 +01001520static int perf_record_config(const char *var, const char *value, void *cb)
1521{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001522 struct record *rec = cb;
1523
1524 if (!strcmp(var, "record.build-id")) {
1525 if (!strcmp(value, "cache"))
1526 rec->no_buildid_cache = false;
1527 else if (!strcmp(value, "no-cache"))
1528 rec->no_buildid_cache = true;
1529 else if (!strcmp(value, "skip"))
1530 rec->no_buildid = true;
1531 else
1532 return -1;
1533 return 0;
1534 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001535 if (!strcmp(var, "record.call-graph")) {
1536 var = "call-graph.record-mode";
1537 return perf_default_config(var, value, cb);
1538 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001539#ifdef HAVE_AIO_SUPPORT
1540 if (!strcmp(var, "record.aio")) {
1541 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1542 if (!rec->opts.nr_cblocks)
1543 rec->opts.nr_cblocks = nr_cblocks_default;
1544 }
1545#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001546
Yisheng Xiecff17202018-03-12 19:25:57 +08001547 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001548}
1549
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001550struct clockid_map {
1551 const char *name;
1552 int clockid;
1553};
1554
1555#define CLOCKID_MAP(n, c) \
1556 { .name = n, .clockid = (c), }
1557
1558#define CLOCKID_END { .name = NULL, }
1559
1560
1561/*
1562 * Add the missing ones, we need to build on many distros...
1563 */
1564#ifndef CLOCK_MONOTONIC_RAW
1565#define CLOCK_MONOTONIC_RAW 4
1566#endif
1567#ifndef CLOCK_BOOTTIME
1568#define CLOCK_BOOTTIME 7
1569#endif
1570#ifndef CLOCK_TAI
1571#define CLOCK_TAI 11
1572#endif
1573
1574static const struct clockid_map clockids[] = {
1575 /* available for all events, NMI safe */
1576 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1577 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1578
1579 /* available for some events */
1580 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1581 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1582 CLOCKID_MAP("tai", CLOCK_TAI),
1583
1584 /* available for the lazy */
1585 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1586 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1587 CLOCKID_MAP("real", CLOCK_REALTIME),
1588 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1589
1590 CLOCKID_END,
1591};
1592
Alexey Budankovcf790512018-10-09 17:36:24 +03001593static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1594{
1595 struct timespec res;
1596
1597 *res_ns = 0;
1598 if (!clock_getres(clk_id, &res))
1599 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1600 else
1601 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1602
1603 return 0;
1604}
1605
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001606static int parse_clockid(const struct option *opt, const char *str, int unset)
1607{
1608 struct record_opts *opts = (struct record_opts *)opt->value;
1609 const struct clockid_map *cm;
1610 const char *ostr = str;
1611
1612 if (unset) {
1613 opts->use_clockid = 0;
1614 return 0;
1615 }
1616
1617 /* no arg passed */
1618 if (!str)
1619 return 0;
1620
1621 /* no setting it twice */
1622 if (opts->use_clockid)
1623 return -1;
1624
1625 opts->use_clockid = true;
1626
1627 /* if its a number, we're done */
1628 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001629 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001630
1631 /* allow a "CLOCK_" prefix to the name */
1632 if (!strncasecmp(str, "CLOCK_", 6))
1633 str += 6;
1634
1635 for (cm = clockids; cm->name; cm++) {
1636 if (!strcasecmp(str, cm->name)) {
1637 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001638 return get_clockid_res(opts->clockid,
1639 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001640 }
1641 }
1642
1643 opts->use_clockid = false;
1644 ui__warning("unknown clockid %s, check man page\n", ostr);
1645 return -1;
1646}
1647
Adrian Huntere9db1312015-04-09 18:53:46 +03001648static int record__parse_mmap_pages(const struct option *opt,
1649 const char *str,
1650 int unset __maybe_unused)
1651{
1652 struct record_opts *opts = opt->value;
1653 char *s, *p;
1654 unsigned int mmap_pages;
1655 int ret;
1656
1657 if (!str)
1658 return -EINVAL;
1659
1660 s = strdup(str);
1661 if (!s)
1662 return -ENOMEM;
1663
1664 p = strchr(s, ',');
1665 if (p)
1666 *p = '\0';
1667
1668 if (*s) {
1669 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1670 if (ret)
1671 goto out_free;
1672 opts->mmap_pages = mmap_pages;
1673 }
1674
1675 if (!p) {
1676 ret = 0;
1677 goto out_free;
1678 }
1679
1680 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1681 if (ret)
1682 goto out_free;
1683
1684 opts->auxtrace_mmap_pages = mmap_pages;
1685
1686out_free:
1687 free(s);
1688 return ret;
1689}
1690
Jiri Olsa0c582442017-01-09 10:51:59 +01001691static void switch_output_size_warn(struct record *rec)
1692{
1693 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1694 struct switch_output *s = &rec->switch_output;
1695
1696 wakeup_size /= 2;
1697
1698 if (s->size < wakeup_size) {
1699 char buf[100];
1700
1701 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1702 pr_warning("WARNING: switch-output data size lower than "
1703 "wakeup kernel buffer size (%s) "
1704 "expect bigger perf.data sizes\n", buf);
1705 }
1706}
1707
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001708static int switch_output_setup(struct record *rec)
1709{
1710 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001711 static struct parse_tag tags_size[] = {
1712 { .tag = 'B', .mult = 1 },
1713 { .tag = 'K', .mult = 1 << 10 },
1714 { .tag = 'M', .mult = 1 << 20 },
1715 { .tag = 'G', .mult = 1 << 30 },
1716 { .tag = 0 },
1717 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001718 static struct parse_tag tags_time[] = {
1719 { .tag = 's', .mult = 1 },
1720 { .tag = 'm', .mult = 60 },
1721 { .tag = 'h', .mult = 60*60 },
1722 { .tag = 'd', .mult = 60*60*24 },
1723 { .tag = 0 },
1724 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001725 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001726
1727 if (!s->set)
1728 return 0;
1729
1730 if (!strcmp(s->str, "signal")) {
1731 s->signal = true;
1732 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001733 goto enabled;
1734 }
1735
1736 val = parse_tag_value(s->str, tags_size);
1737 if (val != (unsigned long) -1) {
1738 s->size = val;
1739 pr_debug("switch-output with %s size threshold\n", s->str);
1740 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001741 }
1742
Jiri Olsabfacbe32017-01-09 10:52:00 +01001743 val = parse_tag_value(s->str, tags_time);
1744 if (val != (unsigned long) -1) {
1745 s->time = val;
1746 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1747 s->str, s->time);
1748 goto enabled;
1749 }
1750
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001751 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001752
1753enabled:
1754 rec->timestamp_filename = true;
1755 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001756
1757 if (s->size && !rec->opts.no_buffering)
1758 switch_output_size_warn(rec);
1759
Jiri Olsadc0c6122017-01-09 10:51:58 +01001760 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001761}
1762
Namhyung Kime5b2c202014-10-23 00:15:46 +09001763static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001764 "perf record [<options>] [<command>]",
1765 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001766 NULL
1767};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001768const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001769
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001770/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001771 * XXX Ideally would be local to cmd_record() and passed to a record__new
1772 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001773 * after cmd_record() exits, but since record_options need to be accessible to
1774 * builtin-script, leave it here.
1775 *
1776 * At least we don't ouch it in all the other functions here directly.
1777 *
1778 * Just say no to tons of global variables, sigh.
1779 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001780static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001781 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001782 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001783 .mmap_pages = UINT_MAX,
1784 .user_freq = UINT_MAX,
1785 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001786 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001787 .target = {
1788 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001789 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001790 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001791 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001792 .tool = {
1793 .sample = process_sample_event,
1794 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001795 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001796 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301797 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001798 .mmap = perf_event__process_mmap,
1799 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001800 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001801 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001802};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001803
Namhyung Kim76a26542015-10-22 23:28:32 +09001804const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1805 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001806
Wang Nan0aab2132016-06-16 08:02:41 +00001807static bool dry_run;
1808
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001809/*
1810 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1811 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001812 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001813 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1814 * using pipes, etc.
1815 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001816static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001817 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001818 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001819 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001820 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001821 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001822 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1823 NULL, "don't record events from perf itself",
1824 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001825 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001826 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001827 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001828 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001829 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001830 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001831 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001832 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001833 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001834 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001835 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001836 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001837 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001838 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001839 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaeae8ad82017-01-23 22:25:41 +01001840 OPT_STRING('o', "output", &record.data.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001841 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001842 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1843 &record.opts.no_inherit_set,
1844 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001845 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1846 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001847 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu45178a92019-01-17 08:15:18 -08001848 OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001849 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1850 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001851 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1852 "profile at this frequency",
1853 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001854 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1855 "number of mmap data pages and AUX area tracing mmap pages",
1856 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001857 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001858 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001859 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001860 NULL, "enables call-graph recording" ,
1861 &record_callchain_opt),
1862 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001863 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001864 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001865 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001866 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001867 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001868 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001869 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001870 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001871 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1872 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001873 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001874 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1875 &record.opts.sample_time_set,
1876 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001877 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1878 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001879 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001880 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001881 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1882 &record.no_buildid_cache_set,
1883 "do not update the buildid cache"),
1884 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1885 &record.no_buildid_set,
1886 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001887 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001888 "monitor event in cgroup name only",
1889 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001890 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001891 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001892 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1893 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001894
1895 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1896 "branch any", "sample any taken branches",
1897 parse_branch_stack),
1898
1899 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1900 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001901 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001902 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1903 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001904 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1905 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001906 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1907 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001908 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1909 "sample selected machine registers on interrupt,"
1910 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07001911 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1912 "sample selected machine registers on interrupt,"
1913 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001914 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1915 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001916 OPT_CALLBACK('k', "clockid", &record.opts,
1917 "clockid", "clockid to use for events, see clock_gettime()",
1918 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001919 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1920 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001921 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04001922 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05301923 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1924 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001925 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1926 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001927 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1928 "Configure all used events to run in kernel space.",
1929 PARSE_OPT_EXCLUSIVE),
1930 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1931 "Configure all used events to run in user space.",
1932 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001933 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1934 "clang binary to use for compiling BPF scriptlets"),
1935 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1936 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001937 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1938 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001939 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1940 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001941 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1942 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08001943 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1944 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001945 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Jiri Olsabfacbe32017-01-09 10:52:00 +01001946 &record.switch_output.set, "signal,size,time",
1947 "Switch output when receive SIGUSR2 or cross size,time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01001948 "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001949 OPT_BOOLEAN(0, "dry-run", &dry_run,
1950 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001951#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03001952 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
1953 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001954 record__aio_parse),
1955#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001956 OPT_END()
1957};
1958
Namhyung Kime5b2c202014-10-23 00:15:46 +09001959struct option *record_options = __record_options;
1960
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001961int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001962{
Adrian Hunteref149c22015-04-09 18:53:45 +03001963 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001964 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001965 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001966
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001967 setlocale(LC_ALL, "");
1968
Wang Nan48e1cab2015-12-14 10:39:22 +00001969#ifndef HAVE_LIBBPF_SUPPORT
1970# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1971 set_nobuild('\0', "clang-path", true);
1972 set_nobuild('\0', "clang-opt", true);
1973# undef set_nobuild
1974#endif
1975
He Kuang7efe0e02015-12-14 10:39:23 +00001976#ifndef HAVE_BPF_PROLOGUE
1977# if !defined (HAVE_DWARF_SUPPORT)
1978# define REASON "NO_DWARF=1"
1979# elif !defined (HAVE_LIBBPF_SUPPORT)
1980# define REASON "NO_LIBBPF=1"
1981# else
1982# define REASON "this architecture doesn't support BPF prologue"
1983# endif
1984# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1985 set_nobuild('\0', "vmlinux", true);
1986# undef set_nobuild
1987# undef REASON
1988#endif
1989
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001990 rec->evlist = perf_evlist__new();
1991 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001992 return -ENOMEM;
1993
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001994 err = perf_config(perf_record_config, rec);
1995 if (err)
1996 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001997
Tom Zanussibca647a2010-11-10 08:11:30 -06001998 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001999 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002000 if (quiet)
2001 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002002
2003 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002004 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002005 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002006
Namhyung Kimbea03402012-04-26 14:15:15 +09002007 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002008 usage_with_options_msg(record_usage, record_options,
2009 "cgroup monitoring only available in system-wide mode");
2010
Stephane Eranian023695d2011-02-14 11:20:01 +02002011 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002012 if (rec->opts.record_switch_events &&
2013 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002014 ui__error("kernel does not support recording context switch events\n");
2015 parse_options_usage(record_usage, record_options, "switch-events", 0);
2016 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002017 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002018
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002019 if (switch_output_setup(rec)) {
2020 parse_options_usage(record_usage, record_options, "switch-output", 0);
2021 return -EINVAL;
2022 }
2023
Jiri Olsabfacbe32017-01-09 10:52:00 +01002024 if (rec->switch_output.time) {
2025 signal(SIGALRM, alarm_sig_handler);
2026 alarm(rec->switch_output.time);
2027 }
2028
Adrian Hunter1b36c032016-09-23 17:38:39 +03002029 /*
2030 * Allow aliases to facilitate the lookup of symbols for address
2031 * filters. Refer to auxtrace_parse_filters().
2032 */
2033 symbol_conf.allow_aliases = true;
2034
2035 symbol__init(NULL);
2036
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002037 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002038 if (err)
2039 goto out;
2040
Wang Nan0aab2132016-06-16 08:02:41 +00002041 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002042 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002043
Wang Nand7888572016-04-08 15:07:24 +00002044 err = bpf__setup_stdout(rec->evlist);
2045 if (err) {
2046 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2047 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2048 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002049 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002050 }
2051
Adrian Hunteref149c22015-04-09 18:53:45 +03002052 err = -ENOMEM;
2053
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002054 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002055 pr_warning(
2056"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2057"check /proc/sys/kernel/kptr_restrict.\n\n"
2058"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2059"file is not found in the buildid cache or in the vmlinux path.\n\n"
2060"Samples in kernel modules won't be resolved at all.\n\n"
2061"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2062"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002063
Wang Nan0c1d46a2016-04-20 18:59:52 +00002064 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002065 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002066 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002067 /*
2068 * In 'perf record --switch-output', disable buildid
2069 * generation by default to reduce data file switching
2070 * overhead. Still generate buildid if they are required
2071 * explicitly using
2072 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002073 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002074 * --no-no-buildid-cache
2075 *
2076 * Following code equals to:
2077 *
2078 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2079 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2080 * disable_buildid_cache();
2081 */
2082 bool disable = true;
2083
2084 if (rec->no_buildid_set && !rec->no_buildid)
2085 disable = false;
2086 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2087 disable = false;
2088 if (disable) {
2089 rec->no_buildid = true;
2090 rec->no_buildid_cache = true;
2091 disable_buildid_cache();
2092 }
2093 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002094
Wang Nan4ea648a2016-07-14 08:34:47 +00002095 if (record.opts.overwrite)
2096 record.opts.tail_synthesize = true;
2097
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002098 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002099 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002100 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002101 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002102 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002103
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002104 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2105 rec->opts.no_inherit = true;
2106
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002107 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002108 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002109 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002110 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002111 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002112
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002113 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002114 if (err) {
2115 int saved_errno = errno;
2116
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002117 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002118 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002119
2120 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002121 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002122 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002123
Mengting Zhangca800062017-12-13 15:01:53 +08002124 /* Enable ignoring missing threads when -u/-p option is defined. */
2125 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002126
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002127 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002128 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002129 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002130
Adrian Hunteref149c22015-04-09 18:53:45 +03002131 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2132 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002133 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002134
Namhyung Kim61566812016-01-11 22:37:09 +09002135 /*
2136 * We take all buildids when the file contains
2137 * AUX area tracing data because we do not decode the
2138 * trace because it would take too long.
2139 */
2140 if (rec->opts.full_auxtrace)
2141 rec->buildid_all = true;
2142
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002143 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002144 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002145 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002146 }
2147
Alexey Budankov93f20c02018-11-06 12:07:19 +03002148 if (rec->opts.nr_cblocks > nr_cblocks_max)
2149 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002150 if (verbose > 0)
2151 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2152
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002153 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002154out:
Namhyung Kim45604712014-05-12 09:47:24 +09002155 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002156 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002157 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002158 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002159}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002160
2161static void snapshot_sig_handler(int sig __maybe_unused)
2162{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002163 struct record *rec = &record;
2164
Wang Nan5f9cf592016-04-20 18:59:49 +00002165 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2166 trigger_hit(&auxtrace_snapshot_trigger);
2167 auxtrace_record__snapshot_started = 1;
2168 if (auxtrace_record__snapshot_start(record.itr))
2169 trigger_error(&auxtrace_snapshot_trigger);
2170 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002171
Jiri Olsadc0c6122017-01-09 10:51:58 +01002172 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002173 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002174}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002175
2176static void alarm_sig_handler(int sig __maybe_unused)
2177{
2178 struct record *rec = &record;
2179
2180 if (switch_output_time(rec))
2181 trigger_hit(&switch_output_trigger);
2182}