blob: 2bf811acaf8dc4700d87eb04ea1418b8089f9830 [file] [log] [blame]
Ingo Molnarabaff322009-06-02 22:59:57 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02007 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +02009
10#include "perf.h"
11
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020012#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020013#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060014#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090016#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020017
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030018#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030019#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020020#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020021#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020022#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020023#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020024#include "util/debug.h"
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -060025#include "util/drv_configs.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Wang Nand8871ea2016-02-26 09:32:06 +000041#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020042
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020043#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020044#include <sched.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030045#include <sys/mman.h>
Wang Nan2d11c652016-05-23 07:13:39 +000046#include <asm/bug.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030047#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030048
Jiri Olsa1b43b702017-01-09 10:51:56 +010049struct switch_output {
50 bool signal;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010051 const char *str;
52 bool set;
Jiri Olsa1b43b702017-01-09 10:51:56 +010053};
54
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030055struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020056 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030057 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020058 u64 bytes_written;
Jiri Olsaf5fc14122013-10-15 16:27:32 +020059 struct perf_data_file file;
Adrian Hunteref149c22015-04-09 18:53:45 +030060 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020061 struct perf_evlist *evlist;
62 struct perf_session *session;
63 const char *progname;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020064 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020065 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000066 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020067 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000068 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090069 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000070 bool timestamp_filename;
Jiri Olsa1b43b702017-01-09 10:51:56 +010071 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070072 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020073};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020074
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030075static int record__write(struct record *rec, void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +020076{
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030077 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +010078 pr_err("failed to write perf data, error: %m\n");
79 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +020080 }
David Ahern8d3eca22012-08-26 12:24:47 -060081
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030082 rec->bytes_written += size;
David Ahern8d3eca22012-08-26 12:24:47 -060083 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +020084}
85
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020086static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020087 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +030088 struct perf_sample *sample __maybe_unused,
89 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -020090{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030091 struct record *rec = container_of(tool, struct record, tool);
92 return record__write(rec, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -020093}
94
Wang Nan3a62a7b2016-05-23 07:13:41 +000095static int
96backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
97{
98 struct perf_event_header *pheader;
99 u64 evt_head = head;
100 int size = mask + 1;
101
102 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
103 pheader = (struct perf_event_header *)(buf + (head & mask));
104 *start = head;
105 while (true) {
106 if (evt_head - head >= (unsigned int)size) {
Colin Ian King5e30d552016-08-22 19:30:08 +0100107 pr_debug("Finished reading backward ring buffer: rewind\n");
Wang Nan3a62a7b2016-05-23 07:13:41 +0000108 if (evt_head - head > (unsigned int)size)
109 evt_head -= pheader->size;
110 *end = evt_head;
111 return 0;
112 }
113
114 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
115
116 if (pheader->size == 0) {
Colin Ian King5e30d552016-08-22 19:30:08 +0100117 pr_debug("Finished reading backward ring buffer: get start\n");
Wang Nan3a62a7b2016-05-23 07:13:41 +0000118 *end = evt_head;
119 return 0;
120 }
121
122 evt_head += pheader->size;
123 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
124 }
125 WARN_ONCE(1, "Shouldn't get here\n");
126 return -1;
127}
128
129static int
Wang Nana4ea0ec2016-07-14 08:34:36 +0000130rb_find_range(void *data, int mask, u64 head, u64 old,
131 u64 *start, u64 *end, bool backward)
Wang Nan3a62a7b2016-05-23 07:13:41 +0000132{
Wang Nana4ea0ec2016-07-14 08:34:36 +0000133 if (!backward) {
Wang Nan3a62a7b2016-05-23 07:13:41 +0000134 *start = old;
135 *end = head;
136 return 0;
137 }
138
139 return backward_rb_find_range(data, mask, head, start, end);
140}
141
Wang Nana4ea0ec2016-07-14 08:34:36 +0000142static int
143record__mmap_read(struct record *rec, struct perf_mmap *md,
144 bool overwrite, bool backward)
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200145{
David Ahern7b8283b52015-04-07 09:20:37 -0600146 u64 head = perf_mmap__read_head(md);
147 u64 old = md->prev;
Wang Nan09fa4f42016-05-23 07:13:40 +0000148 u64 end = head, start = old;
Jiri Olsa918512b2013-09-12 18:39:35 +0200149 unsigned char *data = md->base + page_size;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200150 unsigned long size;
151 void *buf;
David Ahern8d3eca22012-08-26 12:24:47 -0600152 int rc = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200153
Wang Nana4ea0ec2016-07-14 08:34:36 +0000154 if (rb_find_range(data, md->mask, head,
155 old, &start, &end, backward))
Wang Nan3a62a7b2016-05-23 07:13:41 +0000156 return -1;
157
Wang Nan09fa4f42016-05-23 07:13:40 +0000158 if (start == end)
David Ahern8d3eca22012-08-26 12:24:47 -0600159 return 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200160
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200161 rec->samples++;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200162
Wang Nan09fa4f42016-05-23 07:13:40 +0000163 size = end - start;
Wang Nan2d11c652016-05-23 07:13:39 +0000164 if (size > (unsigned long)(md->mask) + 1) {
165 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
166
167 md->prev = head;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000168 perf_mmap__consume(md, overwrite || backward);
Wang Nan2d11c652016-05-23 07:13:39 +0000169 return 0;
170 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200171
Wang Nan09fa4f42016-05-23 07:13:40 +0000172 if ((start & md->mask) + size != (end & md->mask)) {
173 buf = &data[start & md->mask];
174 size = md->mask + 1 - (start & md->mask);
175 start += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200176
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300177 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600178 rc = -1;
179 goto out;
180 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200181 }
182
Wang Nan09fa4f42016-05-23 07:13:40 +0000183 buf = &data[start & md->mask];
184 size = end - start;
185 start += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200186
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300187 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600188 rc = -1;
189 goto out;
190 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200191
Wang Nan09fa4f42016-05-23 07:13:40 +0000192 md->prev = head;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000193 perf_mmap__consume(md, overwrite || backward);
David Ahern8d3eca22012-08-26 12:24:47 -0600194out:
195 return rc;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200196}
197
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300198static volatile int done;
199static volatile int signr = -1;
200static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000201
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300202static volatile int auxtrace_record__snapshot_started;
Wang Nan5f9cf592016-04-20 18:59:49 +0000203static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000204static DEFINE_TRIGGER(switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300205
206static void sig_handler(int sig)
207{
208 if (sig == SIGCHLD)
209 child_finished = 1;
210 else
211 signr = sig;
212
213 done = 1;
214}
215
Wang Nana0748652016-11-26 07:03:28 +0000216static void sigsegv_handler(int sig)
217{
218 perf_hooks__recover();
219 sighandler_dump_stack(sig);
220}
221
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300222static void record__sig_exit(void)
223{
224 if (signr == -1)
225 return;
226
227 signal(signr, SIG_DFL);
228 raise(signr);
229}
230
Adrian Huntere31f0d02015-04-30 17:37:27 +0300231#ifdef HAVE_AUXTRACE_SUPPORT
232
Adrian Hunteref149c22015-04-09 18:53:45 +0300233static int record__process_auxtrace(struct perf_tool *tool,
234 union perf_event *event, void *data1,
235 size_t len1, void *data2, size_t len2)
236{
237 struct record *rec = container_of(tool, struct record, tool);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300238 struct perf_data_file *file = &rec->file;
Adrian Hunteref149c22015-04-09 18:53:45 +0300239 size_t padding;
240 u8 pad[8] = {0};
241
Adrian Hunter99fa2982015-04-30 17:37:25 +0300242 if (!perf_data_file__is_pipe(file)) {
243 off_t file_offset;
244 int fd = perf_data_file__fd(file);
245 int err;
246
247 file_offset = lseek(fd, 0, SEEK_CUR);
248 if (file_offset == -1)
249 return -1;
250 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
251 event, file_offset);
252 if (err)
253 return err;
254 }
255
Adrian Hunteref149c22015-04-09 18:53:45 +0300256 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
257 padding = (len1 + len2) & 7;
258 if (padding)
259 padding = 8 - padding;
260
261 record__write(rec, event, event->header.size);
262 record__write(rec, data1, len1);
263 if (len2)
264 record__write(rec, data2, len2);
265 record__write(rec, &pad, padding);
266
267 return 0;
268}
269
270static int record__auxtrace_mmap_read(struct record *rec,
271 struct auxtrace_mmap *mm)
272{
273 int ret;
274
275 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
276 record__process_auxtrace);
277 if (ret < 0)
278 return ret;
279
280 if (ret)
281 rec->samples++;
282
283 return 0;
284}
285
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300286static int record__auxtrace_mmap_read_snapshot(struct record *rec,
287 struct auxtrace_mmap *mm)
288{
289 int ret;
290
291 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
292 record__process_auxtrace,
293 rec->opts.auxtrace_snapshot_size);
294 if (ret < 0)
295 return ret;
296
297 if (ret)
298 rec->samples++;
299
300 return 0;
301}
302
303static int record__auxtrace_read_snapshot_all(struct record *rec)
304{
305 int i;
306 int rc = 0;
307
308 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
309 struct auxtrace_mmap *mm =
310 &rec->evlist->mmap[i].auxtrace_mmap;
311
312 if (!mm->base)
313 continue;
314
315 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
316 rc = -1;
317 goto out;
318 }
319 }
320out:
321 return rc;
322}
323
324static void record__read_auxtrace_snapshot(struct record *rec)
325{
326 pr_debug("Recording AUX area tracing snapshot\n");
327 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000328 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300329 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000330 if (auxtrace_record__snapshot_finish(rec->itr))
331 trigger_error(&auxtrace_snapshot_trigger);
332 else
333 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300334 }
335}
336
Adrian Huntere31f0d02015-04-30 17:37:27 +0300337#else
338
339static inline
340int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
341 struct auxtrace_mmap *mm __maybe_unused)
342{
343 return 0;
344}
345
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300346static inline
347void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
348{
349}
350
351static inline
352int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
353{
354 return 0;
355}
356
Adrian Huntere31f0d02015-04-30 17:37:27 +0300357#endif
358
Wang Nancda57a82016-06-27 10:24:03 +0000359static int record__mmap_evlist(struct record *rec,
360 struct perf_evlist *evlist)
361{
362 struct record_opts *opts = &rec->opts;
363 char msg[512];
364
365 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
366 opts->auxtrace_mmap_pages,
367 opts->auxtrace_snapshot_mode) < 0) {
368 if (errno == EPERM) {
369 pr_err("Permission error mapping pages.\n"
370 "Consider increasing "
371 "/proc/sys/kernel/perf_event_mlock_kb,\n"
372 "or try again with a smaller value of -m/--mmap_pages.\n"
373 "(current value: %u,%u)\n",
374 opts->mmap_pages, opts->auxtrace_mmap_pages);
375 return -errno;
376 } else {
377 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300378 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000379 if (errno)
380 return -errno;
381 else
382 return -EINVAL;
383 }
384 }
385 return 0;
386}
387
388static int record__mmap(struct record *rec)
389{
390 return record__mmap_evlist(rec, rec->evlist);
391}
392
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300393static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200394{
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300395 char msg[512];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200396 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200397 struct perf_evlist *evlist = rec->evlist;
398 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300399 struct record_opts *opts = &rec->opts;
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600400 struct perf_evsel_config_term *err_term;
David Ahern8d3eca22012-08-26 12:24:47 -0600401 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200402
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300403 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100404
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300405 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200406try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400407 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300408 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300409 if (verbose)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300410 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300411 goto try_again;
412 }
David Ahernca6a4252011-03-25 13:11:11 -0600413
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300414 rc = -errno;
415 perf_evsel__open_strerror(pos, &opts->target,
416 errno, msg, sizeof(msg));
417 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600418 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300419 }
Li Zefanc171b552009-10-15 11:22:07 +0800420 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200421
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300422 if (perf_evlist__apply_filters(evlist, &pos)) {
423 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
424 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300425 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600426 rc = -1;
427 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100428 }
429
Mathieu Poirier5d8bb1e2016-09-16 09:50:03 -0600430 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
431 error("failed to set config \"%s\" on event %s with %d (%s)\n",
432 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
433 str_error_r(errno, msg, sizeof(msg)));
434 rc = -1;
435 goto out;
436 }
437
Wang Nancda57a82016-06-27 10:24:03 +0000438 rc = record__mmap(rec);
439 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600440 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200441
Jiri Olsa563aecb2013-06-05 13:35:06 +0200442 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300443 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600444out:
445 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200446}
447
Namhyung Kime3d59112015-01-29 17:06:44 +0900448static int process_sample_event(struct perf_tool *tool,
449 union perf_event *event,
450 struct perf_sample *sample,
451 struct perf_evsel *evsel,
452 struct machine *machine)
453{
454 struct record *rec = container_of(tool, struct record, tool);
455
456 rec->samples++;
457
458 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
459}
460
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300461static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200462{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200463 struct perf_data_file *file = &rec->file;
464 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200465
He Kuang457ae942015-05-28 13:17:30 +0000466 if (file->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300467 return 0;
468
Namhyung Kim00dc8652014-11-04 10:14:32 +0900469 /*
470 * During this process, it'll load kernel map and replace the
471 * dso->long_name to a real pathname it found. In this case
472 * we prefer the vmlinux path like
473 * /lib/modules/3.16.4/build/vmlinux
474 *
475 * rather than build-id path (in debug directory).
476 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
477 */
478 symbol_conf.ignore_vmlinux_buildid = true;
479
Namhyung Kim61566812016-01-11 22:37:09 +0900480 /*
481 * If --buildid-all is given, it marks all DSO regardless of hits,
482 * so no need to process samples.
483 */
484 if (rec->buildid_all)
485 rec->tool.sample = NULL;
486
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300487 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200488}
489
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200490static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800491{
492 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200493 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800494 /*
495 *As for guest kernel when processing subcommand record&report,
496 *we arrange module mmap prior to guest kernel mmap and trigger
497 *a preload dso because default guest module symbols are loaded
498 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
499 *method is used to avoid symbol missing when the first addr is
500 *in module instead of in guest kernel.
501 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200502 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200503 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800504 if (err < 0)
505 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300506 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800507
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800508 /*
509 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
510 * have no _text sometimes.
511 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200512 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200513 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800514 if (err < 0)
515 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300516 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800517}
518
Frederic Weisbecker98402802010-05-02 22:05:29 +0200519static struct perf_event_header finished_round_event = {
520 .size = sizeof(struct perf_event_header),
521 .type = PERF_RECORD_FINISHED_ROUND,
522};
523
Wang Nana4ea0ec2016-07-14 08:34:36 +0000524static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
525 bool backward)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200526{
Jiri Olsadcabb502014-07-25 16:56:16 +0200527 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200528 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600529 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000530 struct perf_mmap *maps;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200531
Wang Nancb216862016-06-27 10:24:04 +0000532 if (!evlist)
533 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300534
Wang Nanb2cb6152016-07-14 08:34:39 +0000535 maps = backward ? evlist->backward_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000536 if (!maps)
537 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000538
Wang Nan54cc54d2016-07-14 08:34:42 +0000539 if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
540 return 0;
541
Wang Nana4ea0ec2016-07-14 08:34:36 +0000542 for (i = 0; i < evlist->nr_mmaps; i++) {
543 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
544
545 if (maps[i].base) {
546 if (record__mmap_read(rec, &maps[i],
547 evlist->overwrite, backward) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600548 rc = -1;
549 goto out;
550 }
551 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300552
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300553 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunteref149c22015-04-09 18:53:45 +0300554 record__auxtrace_mmap_read(rec, mm) != 0) {
555 rc = -1;
556 goto out;
557 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200558 }
559
Jiri Olsadcabb502014-07-25 16:56:16 +0200560 /*
561 * Mark the round finished in case we wrote
562 * at least one event.
563 */
564 if (bytes_written != rec->bytes_written)
565 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600566
Wang Nan54cc54d2016-07-14 08:34:42 +0000567 if (backward)
568 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600569out:
570 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200571}
572
Wang Nancb216862016-06-27 10:24:04 +0000573static int record__mmap_read_all(struct record *rec)
574{
575 int err;
576
Wang Nana4ea0ec2016-07-14 08:34:36 +0000577 err = record__mmap_read_evlist(rec, rec->evlist, false);
Wang Nancb216862016-06-27 10:24:04 +0000578 if (err)
579 return err;
580
Wang Nan057374642016-07-14 08:34:43 +0000581 return record__mmap_read_evlist(rec, rec->evlist, true);
Wang Nancb216862016-06-27 10:24:04 +0000582}
583
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300584static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700585{
David Ahern57706ab2013-11-06 11:41:34 -0700586 struct perf_session *session = rec->session;
587 int feat;
588
589 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
590 perf_header__set_feat(&session->header, feat);
591
592 if (rec->no_buildid)
593 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
594
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300595 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700596 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
597
598 if (!rec->opts.branch_stack)
599 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300600
601 if (!rec->opts.full_auxtrace)
602 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100603
604 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700605}
606
Wang Nane1ab48b2016-02-26 09:32:10 +0000607static void
608record__finish_output(struct record *rec)
609{
610 struct perf_data_file *file = &rec->file;
611 int fd = perf_data_file__fd(file);
612
613 if (file->is_pipe)
614 return;
615
616 rec->session->header.data_size += rec->bytes_written;
617 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
618
619 if (!rec->no_buildid) {
620 process_buildids(rec);
621
622 if (rec->buildid_all)
623 dsos__hit_all(rec->session);
624 }
625 perf_session__write_header(rec->session, rec->evlist, fd, true);
626
627 return;
628}
629
Wang Nan4ea648a2016-07-14 08:34:47 +0000630static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000631{
632 struct {
633 struct thread_map map;
634 struct thread_map_data map_data;
635 } thread_map;
636
Wang Nan4ea648a2016-07-14 08:34:47 +0000637 if (rec->opts.tail_synthesize != tail)
638 return 0;
639
Wang Nanbe7b0c92016-04-20 18:59:54 +0000640 thread_map.map.nr = 1;
641 thread_map.map.map[0].pid = rec->evlist->workload.pid;
642 thread_map.map.map[0].comm = NULL;
643 return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
644 process_synthesized_event,
645 &rec->session->machines.host,
646 rec->opts.sample_address,
647 rec->opts.proc_map_timeout);
648}
649
Wang Nan4ea648a2016-07-14 08:34:47 +0000650static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000651
Wang Nanecfd7a92016-04-13 08:21:07 +0000652static int
653record__switch_output(struct record *rec, bool at_exit)
654{
655 struct perf_data_file *file = &rec->file;
656 int fd, err;
657
658 /* Same Size: "2015122520103046"*/
659 char timestamp[] = "InvalidTimestamp";
660
Wang Nan4ea648a2016-07-14 08:34:47 +0000661 record__synthesize(rec, true);
662 if (target__none(&rec->opts.target))
663 record__synthesize_workload(rec, true);
664
Wang Nanecfd7a92016-04-13 08:21:07 +0000665 rec->samples = 0;
666 record__finish_output(rec);
667 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
668 if (err) {
669 pr_err("Failed to get current timestamp\n");
670 return -EINVAL;
671 }
672
673 fd = perf_data_file__switch(file, timestamp,
674 rec->session->header.data_offset,
675 at_exit);
676 if (fd >= 0 && !at_exit) {
677 rec->bytes_written = 0;
678 rec->session->header.data_size = 0;
679 }
680
681 if (!quiet)
682 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
683 file->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000684
685 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000686 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000687 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000688
Wang Nanbe7b0c92016-04-20 18:59:54 +0000689 /*
690 * In 'perf record --switch-output' without -a,
691 * record__synthesize() in record__switch_output() won't
692 * generate tracking events because there's no thread_map
693 * in evlist. Which causes newly created perf.data doesn't
694 * contain map and comm information.
695 * Create a fake thread_map and directly call
696 * perf_event__synthesize_thread_map() for those events.
697 */
698 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +0000699 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +0000700 }
Wang Nanecfd7a92016-04-13 08:21:07 +0000701 return fd;
702}
703
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300704static volatile int workload_exec_errno;
705
706/*
707 * perf_evlist__prepare_workload will send a SIGUSR1
708 * if the fork fails, since we asked by setting its
709 * want_signal to true.
710 */
Namhyung Kim45604712014-05-12 09:47:24 +0900711static void workload_exec_failed_signal(int signo __maybe_unused,
712 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300713 void *ucontext __maybe_unused)
714{
715 workload_exec_errno = info->si_value.sival_int;
716 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300717 child_finished = 1;
718}
719
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300720static void snapshot_sig_handler(int sig);
721
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200722int __weak
723perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
724 struct perf_tool *tool __maybe_unused,
725 perf_event__handler_t process __maybe_unused,
726 struct machine *machine __maybe_unused)
727{
728 return 0;
729}
730
Wang Nanee667f92016-06-27 10:24:05 +0000731static const struct perf_event_mmap_page *
732perf_evlist__pick_pc(struct perf_evlist *evlist)
733{
Wang Nanb2cb6152016-07-14 08:34:39 +0000734 if (evlist) {
735 if (evlist->mmap && evlist->mmap[0].base)
736 return evlist->mmap[0].base;
737 if (evlist->backward_mmap && evlist->backward_mmap[0].base)
738 return evlist->backward_mmap[0].base;
739 }
Wang Nanee667f92016-06-27 10:24:05 +0000740 return NULL;
741}
742
Wang Nanc45628b2016-05-24 02:28:59 +0000743static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
744{
Wang Nanee667f92016-06-27 10:24:05 +0000745 const struct perf_event_mmap_page *pc;
746
747 pc = perf_evlist__pick_pc(rec->evlist);
748 if (pc)
749 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +0000750 return NULL;
751}
752
Wang Nan4ea648a2016-07-14 08:34:47 +0000753static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +0000754{
755 struct perf_session *session = rec->session;
756 struct machine *machine = &session->machines.host;
757 struct perf_data_file *file = &rec->file;
758 struct record_opts *opts = &rec->opts;
759 struct perf_tool *tool = &rec->tool;
760 int fd = perf_data_file__fd(file);
761 int err = 0;
762
Wang Nan4ea648a2016-07-14 08:34:47 +0000763 if (rec->opts.tail_synthesize != tail)
764 return 0;
765
Wang Nanc45c86e2016-02-26 09:32:07 +0000766 if (file->is_pipe) {
767 err = perf_event__synthesize_attrs(tool, session,
768 process_synthesized_event);
769 if (err < 0) {
770 pr_err("Couldn't synthesize attrs.\n");
771 goto out;
772 }
773
774 if (have_tracepoints(&rec->evlist->entries)) {
775 /*
776 * FIXME err <= 0 here actually means that
777 * there were no tracepoints so its not really
778 * an error, just that we don't need to
779 * synthesize anything. We really have to
780 * return this more properly and also
781 * propagate errors that now are calling die()
782 */
783 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
784 process_synthesized_event);
785 if (err <= 0) {
786 pr_err("Couldn't record tracing data.\n");
787 goto out;
788 }
789 rec->bytes_written += err;
790 }
791 }
792
Wang Nanc45628b2016-05-24 02:28:59 +0000793 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200794 process_synthesized_event, machine);
795 if (err)
796 goto out;
797
Wang Nanc45c86e2016-02-26 09:32:07 +0000798 if (rec->opts.full_auxtrace) {
799 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
800 session, process_synthesized_event);
801 if (err)
802 goto out;
803 }
804
805 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
806 machine);
807 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
808 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
809 "Check /proc/kallsyms permission or run as root.\n");
810
811 err = perf_event__synthesize_modules(tool, process_synthesized_event,
812 machine);
813 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
814 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
815 "Check /proc/modules permission or run as root.\n");
816
817 if (perf_guest) {
818 machines__process_guests(&session->machines,
819 perf_event__synthesize_guest_os, tool);
820 }
821
822 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
823 process_synthesized_event, opts->sample_address,
824 opts->proc_map_timeout);
825out:
826 return err;
827}
828
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300829static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200830{
David Ahern57706ab2013-11-06 11:41:34 -0700831 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900832 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200833 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300834 const bool forks = argc > 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300835 struct machine *machine;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200836 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300837 struct record_opts *opts = &rec->opts;
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200838 struct perf_data_file *file = &rec->file;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200839 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300840 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900841 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200842
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200843 rec->progname = argv[0];
Andi Kleen33e49ea2011-09-15 14:31:40 -0700844
Namhyung Kim45604712014-05-12 09:47:24 +0900845 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200846 signal(SIGCHLD, sig_handler);
847 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600848 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +0000849 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000850
Jiri Olsa1b43b702017-01-09 10:51:56 +0100851 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.signal) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300852 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000853 if (rec->opts.auxtrace_snapshot_mode)
854 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsa1b43b702017-01-09 10:51:56 +0100855 if (rec->switch_output.signal)
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000856 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000857 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300858 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000859 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200860
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300861 session = perf_session__new(file, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200862 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900863 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200864 return -1;
865 }
866
Namhyung Kim42aa2762015-01-29 17:06:48 +0900867 fd = perf_data_file__fd(file);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200868 rec->session = session;
869
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300870 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100871
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200872 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300873 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200874 argv, file->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300875 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200876 if (err < 0) {
877 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900878 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200879 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200880 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100881 }
882
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300883 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600884 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900885 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600886 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200887
Wang Nan8690a2a2016-02-22 09:10:32 +0000888 err = bpf__apply_obj_config();
889 if (err) {
890 char errbuf[BUFSIZ];
891
892 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
893 pr_err("ERROR: Apply config to BPF failed: %s\n",
894 errbuf);
895 goto out_child;
896 }
897
Adrian Huntercca84822015-08-19 17:29:21 +0300898 /*
899 * Normally perf_session__new would do this, but it doesn't have the
900 * evlist.
901 */
902 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
903 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
904 rec->tool.ordered_events = false;
905 }
906
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300907 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900908 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
909
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200910 if (file->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900911 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500912 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900913 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200914 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900915 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200916 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900917 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200918 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200919
David Ahernd3665492012-02-06 15:27:52 -0700920 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100921 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700922 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100923 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600924 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900925 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100926 }
927
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300928 machine = &session->machines.host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200929
Wang Nan4ea648a2016-07-14 08:34:47 +0000930 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +0000931 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900932 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600933
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200934 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200935 struct sched_param param;
936
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200937 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200938 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200939 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600940 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900941 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200942 }
943 }
944
Jiri Olsa774cb492012-11-12 18:34:01 +0100945 /*
946 * When perf is starting the traced process, all the events
947 * (apart from group members) have enable_on_exec=1 set,
948 * so don't spoil it by prematurely enabling them.
949 */
Andi Kleen6619a532014-01-11 13:38:27 -0800950 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300951 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600952
Peter Zijlstra856e9662009-12-16 17:55:55 +0100953 /*
954 * Let the child rip
955 */
Namhyung Kime803cf92015-09-22 09:24:55 +0900956 if (forks) {
Namhyung Kime5bed562015-09-30 10:45:24 +0900957 union perf_event *event;
958
959 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
960 if (event == NULL) {
961 err = -ENOMEM;
962 goto out_child;
963 }
964
Namhyung Kime803cf92015-09-22 09:24:55 +0900965 /*
966 * Some H/W events are generated before COMM event
967 * which is emitted during exec(), so perf script
968 * cannot see a correct process name for those events.
969 * Synthesize COMM event to prevent it.
970 */
Namhyung Kime5bed562015-09-30 10:45:24 +0900971 perf_event__synthesize_comm(tool, event,
Namhyung Kime803cf92015-09-22 09:24:55 +0900972 rec->evlist->workload.pid,
973 process_synthesized_event,
974 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +0900975 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +0900976
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300977 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +0900978 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100979
Andi Kleen6619a532014-01-11 13:38:27 -0800980 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -0300981 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -0800982 perf_evlist__enable(rec->evlist);
983 }
984
Wang Nan5f9cf592016-04-20 18:59:49 +0000985 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000986 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +0000987 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200988 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -0700989 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200990
Wang Nan057374642016-07-14 08:34:43 +0000991 /*
992 * rec->evlist->bkw_mmap_state is possible to be
993 * BKW_MMAP_EMPTY here: when done == true and
994 * hits != rec->samples in previous round.
995 *
996 * perf_evlist__toggle_bkw_mmap ensure we never
997 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
998 */
999 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1000 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1001
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001002 if (record__mmap_read_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001003 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001004 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001005 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001006 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001007 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001008
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001009 if (auxtrace_record__snapshot_started) {
1010 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001011 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001012 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001013 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001014 pr_err("AUX area tracing snapshot failed\n");
1015 err = -1;
1016 goto out_child;
1017 }
1018 }
1019
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001020 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001021 /*
1022 * If switch_output_trigger is hit, the data in
1023 * overwritable ring buffer should have been collected,
1024 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1025 *
1026 * If SIGUSR2 raise after or during record__mmap_read_all(),
1027 * record__mmap_read_all() didn't collect data from
1028 * overwritable ring buffer. Read again.
1029 */
1030 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1031 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001032 trigger_ready(&switch_output_trigger);
1033
Wang Nan057374642016-07-14 08:34:43 +00001034 /*
1035 * Reenable events in overwrite ring buffer after
1036 * record__mmap_read_all(): we should have collected
1037 * data from it.
1038 */
1039 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1040
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001041 if (!quiet)
1042 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1043 waking);
1044 waking = 0;
1045 fd = record__switch_output(rec, false);
1046 if (fd < 0) {
1047 pr_err("Failed to switch to new file\n");
1048 trigger_error(&switch_output_trigger);
1049 err = fd;
1050 goto out_child;
1051 }
1052 }
1053
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001054 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001055 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001056 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001057 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001058 /*
1059 * Propagate error, only if there's any. Ignore positive
1060 * number of returned events and interrupt error.
1061 */
1062 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001063 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001064 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001065
1066 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1067 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001068 }
1069
Jiri Olsa774cb492012-11-12 18:34:01 +01001070 /*
1071 * When perf is starting the traced process, at the end events
1072 * die with the process and we wait for that. Thus no need to
1073 * disable events in this case.
1074 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001075 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001076 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001077 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001078 disabled = true;
1079 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001080 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001081 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001082 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001083
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001084 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001085 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001086 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001087 pr_err("Workload failed: %s\n", emsg);
1088 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001089 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001090 }
1091
Namhyung Kime3d59112015-01-29 17:06:44 +09001092 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001093 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001094
Wang Nan4ea648a2016-07-14 08:34:47 +00001095 if (target__none(&rec->opts.target))
1096 record__synthesize_workload(rec, true);
1097
Namhyung Kim45604712014-05-12 09:47:24 +09001098out_child:
1099 if (forks) {
1100 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001101
Namhyung Kim45604712014-05-12 09:47:24 +09001102 if (!child_finished)
1103 kill(rec->evlist->workload.pid, SIGTERM);
1104
1105 wait(&exit_status);
1106
1107 if (err < 0)
1108 status = err;
1109 else if (WIFEXITED(exit_status))
1110 status = WEXITSTATUS(exit_status);
1111 else if (WIFSIGNALED(exit_status))
1112 signr = WTERMSIG(exit_status);
1113 } else
1114 status = err;
1115
Wang Nan4ea648a2016-07-14 08:34:47 +00001116 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001117 /* this will be recalculated during process_buildids() */
1118 rec->samples = 0;
1119
Wang Nanecfd7a92016-04-13 08:21:07 +00001120 if (!err) {
1121 if (!rec->timestamp_filename) {
1122 record__finish_output(rec);
1123 } else {
1124 fd = record__switch_output(rec, true);
1125 if (fd < 0) {
1126 status = fd;
1127 goto out_delete_session;
1128 }
1129 }
1130 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001131
Wang Nana0748652016-11-26 07:03:28 +00001132 perf_hooks__invoke_record_end();
1133
Namhyung Kime3d59112015-01-29 17:06:44 +09001134 if (!err && !quiet) {
1135 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001136 const char *postfix = rec->timestamp_filename ?
1137 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001138
Adrian Hunteref149c22015-04-09 18:53:45 +03001139 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001140 scnprintf(samples, sizeof(samples),
1141 " (%" PRIu64 " samples)", rec->samples);
1142 else
1143 samples[0] = '\0';
1144
Wang Nanecfd7a92016-04-13 08:21:07 +00001145 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Namhyung Kime3d59112015-01-29 17:06:44 +09001146 perf_data_file__size(file) / 1024.0 / 1024.0,
Wang Nanecfd7a92016-04-13 08:21:07 +00001147 file->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001148 }
1149
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001150out_delete_session:
1151 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +09001152 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001153}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001154
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001155static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001156{
Kan Liangaad2b212015-01-05 13:23:04 -05001157 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001158
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001159 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001160
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001161 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001162 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001163 callchain->dump_size);
1164}
1165
1166int record_opts__parse_callchain(struct record_opts *record,
1167 struct callchain_param *callchain,
1168 const char *arg, bool unset)
1169{
1170 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001171 callchain->enabled = !unset;
1172
1173 /* --no-call-graph */
1174 if (unset) {
1175 callchain->record_mode = CALLCHAIN_NONE;
1176 pr_debug("callchain: disabled\n");
1177 return 0;
1178 }
1179
1180 ret = parse_callchain_record_opt(arg, callchain);
1181 if (!ret) {
1182 /* Enable data address sampling for DWARF unwind. */
1183 if (callchain->record_mode == CALLCHAIN_DWARF)
1184 record->sample_address = true;
1185 callchain_debug(callchain);
1186 }
1187
1188 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001189}
1190
Kan Liangc421e802015-07-29 05:42:12 -04001191int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001192 const char *arg,
1193 int unset)
1194{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001195 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001196}
1197
Kan Liangc421e802015-07-29 05:42:12 -04001198int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001199 const char *arg __maybe_unused,
1200 int unset __maybe_unused)
1201{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001202 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001203
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001204 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001205
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001206 if (callchain->record_mode == CALLCHAIN_NONE)
1207 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001208
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001209 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001210 return 0;
1211}
1212
Jiri Olsaeb853e82014-02-03 12:44:42 +01001213static int perf_record_config(const char *var, const char *value, void *cb)
1214{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001215 struct record *rec = cb;
1216
1217 if (!strcmp(var, "record.build-id")) {
1218 if (!strcmp(value, "cache"))
1219 rec->no_buildid_cache = false;
1220 else if (!strcmp(value, "no-cache"))
1221 rec->no_buildid_cache = true;
1222 else if (!strcmp(value, "skip"))
1223 rec->no_buildid = true;
1224 else
1225 return -1;
1226 return 0;
1227 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001228 if (!strcmp(var, "record.call-graph"))
Namhyung Kim5a2e5e82014-09-23 10:01:44 +09001229 var = "call-graph.record-mode"; /* fall-through */
Jiri Olsaeb853e82014-02-03 12:44:42 +01001230
1231 return perf_default_config(var, value, cb);
1232}
1233
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001234struct clockid_map {
1235 const char *name;
1236 int clockid;
1237};
1238
1239#define CLOCKID_MAP(n, c) \
1240 { .name = n, .clockid = (c), }
1241
1242#define CLOCKID_END { .name = NULL, }
1243
1244
1245/*
1246 * Add the missing ones, we need to build on many distros...
1247 */
1248#ifndef CLOCK_MONOTONIC_RAW
1249#define CLOCK_MONOTONIC_RAW 4
1250#endif
1251#ifndef CLOCK_BOOTTIME
1252#define CLOCK_BOOTTIME 7
1253#endif
1254#ifndef CLOCK_TAI
1255#define CLOCK_TAI 11
1256#endif
1257
1258static const struct clockid_map clockids[] = {
1259 /* available for all events, NMI safe */
1260 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1261 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1262
1263 /* available for some events */
1264 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1265 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1266 CLOCKID_MAP("tai", CLOCK_TAI),
1267
1268 /* available for the lazy */
1269 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1270 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1271 CLOCKID_MAP("real", CLOCK_REALTIME),
1272 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1273
1274 CLOCKID_END,
1275};
1276
1277static int parse_clockid(const struct option *opt, const char *str, int unset)
1278{
1279 struct record_opts *opts = (struct record_opts *)opt->value;
1280 const struct clockid_map *cm;
1281 const char *ostr = str;
1282
1283 if (unset) {
1284 opts->use_clockid = 0;
1285 return 0;
1286 }
1287
1288 /* no arg passed */
1289 if (!str)
1290 return 0;
1291
1292 /* no setting it twice */
1293 if (opts->use_clockid)
1294 return -1;
1295
1296 opts->use_clockid = true;
1297
1298 /* if its a number, we're done */
1299 if (sscanf(str, "%d", &opts->clockid) == 1)
1300 return 0;
1301
1302 /* allow a "CLOCK_" prefix to the name */
1303 if (!strncasecmp(str, "CLOCK_", 6))
1304 str += 6;
1305
1306 for (cm = clockids; cm->name; cm++) {
1307 if (!strcasecmp(str, cm->name)) {
1308 opts->clockid = cm->clockid;
1309 return 0;
1310 }
1311 }
1312
1313 opts->use_clockid = false;
1314 ui__warning("unknown clockid %s, check man page\n", ostr);
1315 return -1;
1316}
1317
Adrian Huntere9db1312015-04-09 18:53:46 +03001318static int record__parse_mmap_pages(const struct option *opt,
1319 const char *str,
1320 int unset __maybe_unused)
1321{
1322 struct record_opts *opts = opt->value;
1323 char *s, *p;
1324 unsigned int mmap_pages;
1325 int ret;
1326
1327 if (!str)
1328 return -EINVAL;
1329
1330 s = strdup(str);
1331 if (!s)
1332 return -ENOMEM;
1333
1334 p = strchr(s, ',');
1335 if (p)
1336 *p = '\0';
1337
1338 if (*s) {
1339 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1340 if (ret)
1341 goto out_free;
1342 opts->mmap_pages = mmap_pages;
1343 }
1344
1345 if (!p) {
1346 ret = 0;
1347 goto out_free;
1348 }
1349
1350 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1351 if (ret)
1352 goto out_free;
1353
1354 opts->auxtrace_mmap_pages = mmap_pages;
1355
1356out_free:
1357 free(s);
1358 return ret;
1359}
1360
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001361static int switch_output_setup(struct record *rec)
1362{
1363 struct switch_output *s = &rec->switch_output;
1364
1365 if (!s->set)
1366 return 0;
1367
1368 if (!strcmp(s->str, "signal")) {
1369 s->signal = true;
1370 pr_debug("switch-output with SIGUSR2 signal\n");
1371 return 0;
1372 }
1373
1374 return -1;
1375}
1376
Namhyung Kime5b2c202014-10-23 00:15:46 +09001377static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001378 "perf record [<options>] [<command>]",
1379 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001380 NULL
1381};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001382const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001383
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001384/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001385 * XXX Ideally would be local to cmd_record() and passed to a record__new
1386 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001387 * after cmd_record() exits, but since record_options need to be accessible to
1388 * builtin-script, leave it here.
1389 *
1390 * At least we don't ouch it in all the other functions here directly.
1391 *
1392 * Just say no to tons of global variables, sigh.
1393 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001394static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001395 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001396 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001397 .mmap_pages = UINT_MAX,
1398 .user_freq = UINT_MAX,
1399 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001400 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001401 .target = {
1402 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001403 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001404 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001405 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001406 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001407 .tool = {
1408 .sample = process_sample_event,
1409 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001410 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001411 .comm = perf_event__process_comm,
1412 .mmap = perf_event__process_mmap,
1413 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001414 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001415 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001416};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001417
Namhyung Kim76a26542015-10-22 23:28:32 +09001418const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1419 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001420
Wang Nan0aab2132016-06-16 08:02:41 +00001421static bool dry_run;
1422
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001423/*
1424 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1425 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001426 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001427 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1428 * using pipes, etc.
1429 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001430static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001431 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001432 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001433 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001434 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001435 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001436 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1437 NULL, "don't record events from perf itself",
1438 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001439 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001440 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001441 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001442 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001443 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001444 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001445 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001446 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001447 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001448 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001449 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001450 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001451 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001452 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001453 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001454 OPT_STRING('o', "output", &record.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001455 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001456 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1457 &record.opts.no_inherit_set,
1458 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001459 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1460 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001461 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001462 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
Adrian Huntere9db1312015-04-09 18:53:46 +03001463 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1464 "number of mmap data pages and AUX area tracing mmap pages",
1465 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001466 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001467 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001468 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001469 NULL, "enables call-graph recording" ,
1470 &record_callchain_opt),
1471 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001472 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001473 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001474 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001475 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001476 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001477 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001478 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001479 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001480 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001481 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1482 &record.opts.sample_time_set,
1483 "Record the sample timestamps"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001484 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001485 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001486 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001487 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1488 &record.no_buildid_cache_set,
1489 "do not update the buildid cache"),
1490 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1491 &record.no_buildid_set,
1492 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001493 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001494 "monitor event in cgroup name only",
1495 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001496 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001497 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001498 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1499 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001500
1501 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1502 "branch any", "sample any taken branches",
1503 parse_branch_stack),
1504
1505 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1506 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001507 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001508 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1509 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001510 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1511 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001512 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1513 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001514 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1515 "sample selected machine registers on interrupt,"
1516 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001517 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1518 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001519 OPT_CALLBACK('k', "clockid", &record.opts,
1520 "clockid", "clockid to use for events, see clock_gettime()",
1521 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001522 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1523 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001524 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1525 "per thread proc mmap processing timeout in ms"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001526 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1527 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001528 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1529 "Configure all used events to run in kernel space.",
1530 PARSE_OPT_EXCLUSIVE),
1531 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1532 "Configure all used events to run in user space.",
1533 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001534 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1535 "clang binary to use for compiling BPF scriptlets"),
1536 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1537 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001538 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1539 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001540 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1541 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001542 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1543 "append timestamp to output filename"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001544 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
1545 &record.switch_output.set, "signal",
1546 "Switch output when receive SIGUSR2", "signal"),
Wang Nan0aab2132016-06-16 08:02:41 +00001547 OPT_BOOLEAN(0, "dry-run", &dry_run,
1548 "Parse options then exit"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001549 OPT_END()
1550};
1551
Namhyung Kime5b2c202014-10-23 00:15:46 +09001552struct option *record_options = __record_options;
1553
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001554int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001555{
Adrian Hunteref149c22015-04-09 18:53:45 +03001556 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001557 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001558 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001559
Wang Nan48e1cab2015-12-14 10:39:22 +00001560#ifndef HAVE_LIBBPF_SUPPORT
1561# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1562 set_nobuild('\0', "clang-path", true);
1563 set_nobuild('\0', "clang-opt", true);
1564# undef set_nobuild
1565#endif
1566
He Kuang7efe0e02015-12-14 10:39:23 +00001567#ifndef HAVE_BPF_PROLOGUE
1568# if !defined (HAVE_DWARF_SUPPORT)
1569# define REASON "NO_DWARF=1"
1570# elif !defined (HAVE_LIBBPF_SUPPORT)
1571# define REASON "NO_LIBBPF=1"
1572# else
1573# define REASON "this architecture doesn't support BPF prologue"
1574# endif
1575# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1576 set_nobuild('\0', "vmlinux", true);
1577# undef set_nobuild
1578# undef REASON
1579#endif
1580
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001581 rec->evlist = perf_evlist__new();
1582 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001583 return -ENOMEM;
1584
Jiri Olsaeb853e82014-02-03 12:44:42 +01001585 perf_config(perf_record_config, rec);
1586
Tom Zanussibca647a2010-11-10 08:11:30 -06001587 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001588 PARSE_OPT_STOP_AT_NON_OPTION);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001589 if (!argc && target__none(&rec->opts.target))
Tom Zanussibca647a2010-11-10 08:11:30 -06001590 usage_with_options(record_usage, record_options);
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001591
Namhyung Kimbea03402012-04-26 14:15:15 +09001592 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001593 usage_with_options_msg(record_usage, record_options,
1594 "cgroup monitoring only available in system-wide mode");
1595
Stephane Eranian023695d2011-02-14 11:20:01 +02001596 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001597 if (rec->opts.record_switch_events &&
1598 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001599 ui__error("kernel does not support recording context switch events\n");
1600 parse_options_usage(record_usage, record_options, "switch-events", 0);
1601 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001602 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001603
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001604 if (switch_output_setup(rec)) {
1605 parse_options_usage(record_usage, record_options, "switch-output", 0);
1606 return -EINVAL;
1607 }
1608
Jiri Olsa1b43b702017-01-09 10:51:56 +01001609 if (rec->switch_output.signal)
Wang Naneca857a2016-04-20 18:59:51 +00001610 rec->timestamp_filename = true;
1611
Adrian Hunteref149c22015-04-09 18:53:45 +03001612 if (!rec->itr) {
1613 rec->itr = auxtrace_record__init(rec->evlist, &err);
1614 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001615 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001616 }
1617
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001618 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1619 rec->opts.auxtrace_snapshot_opts);
1620 if (err)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001621 goto out;
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001622
Adrian Hunter1b36c032016-09-23 17:38:39 +03001623 /*
1624 * Allow aliases to facilitate the lookup of symbols for address
1625 * filters. Refer to auxtrace_parse_filters().
1626 */
1627 symbol_conf.allow_aliases = true;
1628
1629 symbol__init(NULL);
1630
1631 err = auxtrace_parse_filters(rec->evlist);
1632 if (err)
1633 goto out;
1634
Wang Nan0aab2132016-06-16 08:02:41 +00001635 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001636 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00001637
Wang Nand7888572016-04-08 15:07:24 +00001638 err = bpf__setup_stdout(rec->evlist);
1639 if (err) {
1640 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1641 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1642 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03001643 goto out;
Wang Nand7888572016-04-08 15:07:24 +00001644 }
1645
Adrian Hunteref149c22015-04-09 18:53:45 +03001646 err = -ENOMEM;
1647
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001648 if (symbol_conf.kptr_restrict)
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03001649 pr_warning(
1650"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1651"check /proc/sys/kernel/kptr_restrict.\n\n"
1652"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1653"file is not found in the buildid cache or in the vmlinux path.\n\n"
1654"Samples in kernel modules won't be resolved at all.\n\n"
1655"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1656"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001657
Wang Nan0c1d46a2016-04-20 18:59:52 +00001658 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001659 disable_buildid_cache();
Jiri Olsa1b43b702017-01-09 10:51:56 +01001660 } else if (rec->switch_output.signal) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00001661 /*
1662 * In 'perf record --switch-output', disable buildid
1663 * generation by default to reduce data file switching
1664 * overhead. Still generate buildid if they are required
1665 * explicitly using
1666 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01001667 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00001668 * --no-no-buildid-cache
1669 *
1670 * Following code equals to:
1671 *
1672 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1673 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1674 * disable_buildid_cache();
1675 */
1676 bool disable = true;
1677
1678 if (rec->no_buildid_set && !rec->no_buildid)
1679 disable = false;
1680 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1681 disable = false;
1682 if (disable) {
1683 rec->no_buildid = true;
1684 rec->no_buildid_cache = true;
1685 disable_buildid_cache();
1686 }
1687 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001688
Wang Nan4ea648a2016-07-14 08:34:47 +00001689 if (record.opts.overwrite)
1690 record.opts.tail_synthesize = true;
1691
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001692 if (rec->evlist->nr_entries == 0 &&
1693 perf_evlist__add_default(rec->evlist) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001694 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03001695 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001696 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001697
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001698 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1699 rec->opts.no_inherit = true;
1700
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001701 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001702 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001703 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001704 ui__warning("%s", errbuf);
1705 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001706
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001707 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001708 if (err) {
1709 int saved_errno = errno;
1710
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001711 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001712 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001713
1714 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001715 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001716 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001717
Jiri Olsa23dc4f12016-12-12 11:35:43 +01001718 /* Enable ignoring missing threads when -u option is defined. */
1719 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1720
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001721 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001722 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001723 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001724
Adrian Hunteref149c22015-04-09 18:53:45 +03001725 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1726 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03001727 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03001728
Namhyung Kim61566812016-01-11 22:37:09 +09001729 /*
1730 * We take all buildids when the file contains
1731 * AUX area tracing data because we do not decode the
1732 * trace because it would take too long.
1733 */
1734 if (rec->opts.full_auxtrace)
1735 rec->buildid_all = true;
1736
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001737 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001738 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03001739 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001740 }
1741
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001742 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03001743out:
Namhyung Kim45604712014-05-12 09:47:24 +09001744 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001745 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001746 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001747 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001748}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001749
1750static void snapshot_sig_handler(int sig __maybe_unused)
1751{
Wang Nan5f9cf592016-04-20 18:59:49 +00001752 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1753 trigger_hit(&auxtrace_snapshot_trigger);
1754 auxtrace_record__snapshot_started = 1;
1755 if (auxtrace_record__snapshot_start(record.itr))
1756 trigger_error(&auxtrace_snapshot_trigger);
1757 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001758
1759 if (trigger_is_ready(&switch_output_trigger))
1760 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001761}