blob: 359bb8f33e578bf663e4f03556b38b4f00c70a9c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030041#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030042#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080043#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020045
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030046#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030047#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030048#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030049#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020050#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020051#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030052#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030053#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030055#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030056#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020076 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankovef781122019-03-18 20:44:12 +0300136static int record__aio_enabled(struct record *rec);
137static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300138static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
139 void *src, size_t src_size);
140
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300141#ifdef HAVE_AIO_SUPPORT
142static int record__aio_write(struct aiocb *cblock, int trace_fd,
143 void *buf, size_t size, off_t off)
144{
145 int rc;
146
147 cblock->aio_fildes = trace_fd;
148 cblock->aio_buf = buf;
149 cblock->aio_nbytes = size;
150 cblock->aio_offset = off;
151 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
152
153 do {
154 rc = aio_write(cblock);
155 if (rc == 0) {
156 break;
157 } else if (errno != EAGAIN) {
158 cblock->aio_fildes = -1;
159 pr_err("failed to queue perf data, error: %m\n");
160 break;
161 }
162 } while (1);
163
164 return rc;
165}
166
167static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
168{
169 void *rem_buf;
170 off_t rem_off;
171 size_t rem_size;
172 int rc, aio_errno;
173 ssize_t aio_ret, written;
174
175 aio_errno = aio_error(cblock);
176 if (aio_errno == EINPROGRESS)
177 return 0;
178
179 written = aio_ret = aio_return(cblock);
180 if (aio_ret < 0) {
181 if (aio_errno != EINTR)
182 pr_err("failed to write perf data, error: %m\n");
183 written = 0;
184 }
185
186 rem_size = cblock->aio_nbytes - written;
187
188 if (rem_size == 0) {
189 cblock->aio_fildes = -1;
190 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300191 * md->refcount is incremented in record__aio_pushfn() for
192 * every aio write request started in record__aio_push() so
193 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300194 */
195 perf_mmap__put(md);
196 rc = 1;
197 } else {
198 /*
199 * aio write request may require restart with the
200 * reminder if the kernel didn't write whole
201 * chunk at once.
202 */
203 rem_off = cblock->aio_offset + written;
204 rem_buf = (void *)(cblock->aio_buf + written);
205 record__aio_write(cblock, cblock->aio_fildes,
206 rem_buf, rem_size, rem_off);
207 rc = 0;
208 }
209
210 return rc;
211}
212
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300214{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300215 struct aiocb **aiocb = md->aio.aiocb;
216 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300217 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300218 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300219
220 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300221 do_suspend = 0;
222 for (i = 0; i < md->aio.nr_cblocks; ++i) {
223 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
224 if (sync_all)
225 aiocb[i] = NULL;
226 else
227 return i;
228 } else {
229 /*
230 * Started aio write is not complete yet
231 * so it has to be waited before the
232 * next allocation.
233 */
234 aiocb[i] = &cblocks[i];
235 do_suspend = 1;
236 }
237 }
238 if (!do_suspend)
239 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300240
Alexey Budankov93f20c02018-11-06 12:07:19 +0300241 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300242 if (!(errno == EAGAIN || errno == EINTR))
243 pr_err("failed to sync perf data, error: %m\n");
244 }
245 } while (1);
246}
247
Alexey Budankovef781122019-03-18 20:44:12 +0300248struct record_aio {
249 struct record *rec;
250 void *data;
251 size_t size;
252};
253
254static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300255{
Alexey Budankovef781122019-03-18 20:44:12 +0300256 struct record_aio *aio = to;
257
258 /*
259 * map->base data pointed by buf is copied into free map->aio.data[] buffer
260 * to release space in the kernel buffer as fast as possible, calling
261 * perf_mmap__consume() from perf_mmap__push() function.
262 *
263 * That lets the kernel to proceed with storing more profiling data into
264 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
265 *
266 * Coping can be done in two steps in case the chunk of profiling data
267 * crosses the upper bound of the kernel buffer. In this case we first move
268 * part of data from map->start till the upper bound and then the reminder
269 * from the beginning of the kernel buffer till the end of the data chunk.
270 */
271
272 if (record__comp_enabled(aio->rec)) {
273 size = zstd_compress(aio->rec->session, aio->data + aio->size,
274 perf_mmap__mmap_len(map) - aio->size,
275 buf, size);
276 } else {
277 memcpy(aio->data + aio->size, buf, size);
278 }
279
280 if (!aio->size) {
281 /*
282 * Increment map->refcount to guard map->aio.data[] buffer
283 * from premature deallocation because map object can be
284 * released earlier than aio write request started on
285 * map->aio.data[] buffer is complete.
286 *
287 * perf_mmap__put() is done at record__aio_complete()
288 * after started aio request completion or at record__aio_push()
289 * if the request failed to start.
290 */
291 perf_mmap__get(map);
292 }
293
294 aio->size += size;
295
296 return size;
297}
298
299static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
300{
301 int ret, idx;
302 int trace_fd = rec->session->data->file.fd;
303 struct record_aio aio = { .rec = rec, .size = 0 };
304
305 /*
306 * Call record__aio_sync() to wait till map->aio.data[] buffer
307 * becomes available after previous aio write operation.
308 */
309
310 idx = record__aio_sync(map, false);
311 aio.data = map->aio.data[idx];
312 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
313 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
314 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300315
316 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300317 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300318 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300319 *off += aio.size;
320 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300321 if (switch_output_size(rec))
322 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300323 } else {
324 /*
325 * Decrement map->refcount incremented in record__aio_pushfn()
326 * back if record__aio_write() operation failed to start, otherwise
327 * map->refcount is decremented in record__aio_complete() after
328 * aio write operation finishes successfully.
329 */
330 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300331 }
332
333 return ret;
334}
335
336static off_t record__aio_get_pos(int trace_fd)
337{
338 return lseek(trace_fd, 0, SEEK_CUR);
339}
340
341static void record__aio_set_pos(int trace_fd, off_t pos)
342{
343 lseek(trace_fd, pos, SEEK_SET);
344}
345
346static void record__aio_mmap_read_sync(struct record *rec)
347{
348 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200349 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300350 struct perf_mmap *maps = evlist->mmap;
351
Alexey Budankovef781122019-03-18 20:44:12 +0300352 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300353 return;
354
355 for (i = 0; i < evlist->nr_mmaps; i++) {
356 struct perf_mmap *map = &maps[i];
357
358 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300359 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300360 }
361}
362
363static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300364static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300365
366static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300367 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300368 int unset)
369{
370 struct record_opts *opts = (struct record_opts *)opt->value;
371
Alexey Budankov93f20c02018-11-06 12:07:19 +0300372 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300373 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300374 } else {
375 if (str)
376 opts->nr_cblocks = strtol(str, NULL, 0);
377 if (!opts->nr_cblocks)
378 opts->nr_cblocks = nr_cblocks_default;
379 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300380
381 return 0;
382}
383#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300384static int nr_cblocks_max = 0;
385
Alexey Budankovef781122019-03-18 20:44:12 +0300386static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
387 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300388{
389 return -1;
390}
391
392static off_t record__aio_get_pos(int trace_fd __maybe_unused)
393{
394 return -1;
395}
396
397static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
398{
399}
400
401static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
402{
403}
404#endif
405
406static int record__aio_enabled(struct record *rec)
407{
408 return rec->opts.nr_cblocks > 0;
409}
410
Alexey Budankov470530b2019-03-18 20:40:26 +0300411#define MMAP_FLUSH_DEFAULT 1
412static int record__mmap_flush_parse(const struct option *opt,
413 const char *str,
414 int unset)
415{
416 int flush_max;
417 struct record_opts *opts = (struct record_opts *)opt->value;
418 static struct parse_tag tags[] = {
419 { .tag = 'B', .mult = 1 },
420 { .tag = 'K', .mult = 1 << 10 },
421 { .tag = 'M', .mult = 1 << 20 },
422 { .tag = 'G', .mult = 1 << 30 },
423 { .tag = 0 },
424 };
425
426 if (unset)
427 return 0;
428
429 if (str) {
430 opts->mmap_flush = parse_tag_value(str, tags);
431 if (opts->mmap_flush == (int)-1)
432 opts->mmap_flush = strtol(str, NULL, 0);
433 }
434
435 if (!opts->mmap_flush)
436 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
437
438 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
439 flush_max /= 4;
440 if (opts->mmap_flush > flush_max)
441 opts->mmap_flush = flush_max;
442
443 return 0;
444}
445
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300446#ifdef HAVE_ZSTD_SUPPORT
447static unsigned int comp_level_default = 1;
448
449static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
450{
451 struct record_opts *opts = opt->value;
452
453 if (unset) {
454 opts->comp_level = 0;
455 } else {
456 if (str)
457 opts->comp_level = strtol(str, NULL, 0);
458 if (!opts->comp_level)
459 opts->comp_level = comp_level_default;
460 }
461
462 return 0;
463}
464#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300465static unsigned int comp_level_max = 22;
466
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300467static int record__comp_enabled(struct record *rec)
468{
469 return rec->opts.comp_level > 0;
470}
471
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200472static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200473 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300474 struct perf_sample *sample __maybe_unused,
475 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200476{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300477 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200478 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200479}
480
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200481static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300482{
483 struct record *rec = to;
484
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300485 if (record__comp_enabled(rec)) {
486 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
487 bf = map->data;
488 }
489
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300490 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200491 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300492}
493
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300494static volatile int done;
495static volatile int signr = -1;
496static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000497
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300498static void sig_handler(int sig)
499{
500 if (sig == SIGCHLD)
501 child_finished = 1;
502 else
503 signr = sig;
504
505 done = 1;
506}
507
Wang Nana0748652016-11-26 07:03:28 +0000508static void sigsegv_handler(int sig)
509{
510 perf_hooks__recover();
511 sighandler_dump_stack(sig);
512}
513
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300514static void record__sig_exit(void)
515{
516 if (signr == -1)
517 return;
518
519 signal(signr, SIG_DFL);
520 raise(signr);
521}
522
Adrian Huntere31f0d02015-04-30 17:37:27 +0300523#ifdef HAVE_AUXTRACE_SUPPORT
524
Adrian Hunteref149c22015-04-09 18:53:45 +0300525static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200526 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300527 union perf_event *event, void *data1,
528 size_t len1, void *data2, size_t len2)
529{
530 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100531 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300532 size_t padding;
533 u8 pad[8] = {0};
534
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100535 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300536 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100537 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300538 int err;
539
540 file_offset = lseek(fd, 0, SEEK_CUR);
541 if (file_offset == -1)
542 return -1;
543 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
544 event, file_offset);
545 if (err)
546 return err;
547 }
548
Adrian Hunteref149c22015-04-09 18:53:45 +0300549 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
550 padding = (len1 + len2) & 7;
551 if (padding)
552 padding = 8 - padding;
553
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200554 record__write(rec, map, event, event->header.size);
555 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300556 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200557 record__write(rec, map, data2, len2);
558 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300559
560 return 0;
561}
562
563static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200564 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300565{
566 int ret;
567
Jiri Olsae035f4c2018-09-13 14:54:05 +0200568 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300569 record__process_auxtrace);
570 if (ret < 0)
571 return ret;
572
573 if (ret)
574 rec->samples++;
575
576 return 0;
577}
578
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300579static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200580 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300581{
582 int ret;
583
Jiri Olsae035f4c2018-09-13 14:54:05 +0200584 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300585 record__process_auxtrace,
586 rec->opts.auxtrace_snapshot_size);
587 if (ret < 0)
588 return ret;
589
590 if (ret)
591 rec->samples++;
592
593 return 0;
594}
595
596static int record__auxtrace_read_snapshot_all(struct record *rec)
597{
598 int i;
599 int rc = 0;
600
601 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200602 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300603
Jiri Olsae035f4c2018-09-13 14:54:05 +0200604 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300605 continue;
606
Jiri Olsae035f4c2018-09-13 14:54:05 +0200607 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300608 rc = -1;
609 goto out;
610 }
611 }
612out:
613 return rc;
614}
615
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300616static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300617{
618 pr_debug("Recording AUX area tracing snapshot\n");
619 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000620 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300621 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300622 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000623 trigger_error(&auxtrace_snapshot_trigger);
624 else
625 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300626 }
627}
628
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300629static int record__auxtrace_snapshot_exit(struct record *rec)
630{
631 if (trigger_is_error(&auxtrace_snapshot_trigger))
632 return 0;
633
634 if (!auxtrace_record__snapshot_started &&
635 auxtrace_record__snapshot_start(rec->itr))
636 return -1;
637
638 record__read_auxtrace_snapshot(rec, true);
639 if (trigger_is_error(&auxtrace_snapshot_trigger))
640 return -1;
641
642 return 0;
643}
644
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200645static int record__auxtrace_init(struct record *rec)
646{
647 int err;
648
649 if (!rec->itr) {
650 rec->itr = auxtrace_record__init(rec->evlist, &err);
651 if (err)
652 return err;
653 }
654
655 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
656 rec->opts.auxtrace_snapshot_opts);
657 if (err)
658 return err;
659
660 return auxtrace_parse_filters(rec->evlist);
661}
662
Adrian Huntere31f0d02015-04-30 17:37:27 +0300663#else
664
665static inline
666int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200667 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300668{
669 return 0;
670}
671
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300672static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300673void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
674 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300675{
676}
677
678static inline
679int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
680{
681 return 0;
682}
683
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300684static inline
685int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
686{
687 return 0;
688}
689
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200690static int record__auxtrace_init(struct record *rec __maybe_unused)
691{
692 return 0;
693}
694
Adrian Huntere31f0d02015-04-30 17:37:27 +0300695#endif
696
Wang Nancda57a82016-06-27 10:24:03 +0000697static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200698 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000699{
700 struct record_opts *opts = &rec->opts;
701 char msg[512];
702
Alexey Budankovf13de662019-01-22 20:50:57 +0300703 if (opts->affinity != PERF_AFFINITY_SYS)
704 cpu__setup_cpunode_map();
705
Wang Nan7a276ff2017-12-03 02:00:38 +0000706 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000707 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300708 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300709 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300710 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000711 if (errno == EPERM) {
712 pr_err("Permission error mapping pages.\n"
713 "Consider increasing "
714 "/proc/sys/kernel/perf_event_mlock_kb,\n"
715 "or try again with a smaller value of -m/--mmap_pages.\n"
716 "(current value: %u,%u)\n",
717 opts->mmap_pages, opts->auxtrace_mmap_pages);
718 return -errno;
719 } else {
720 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300721 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000722 if (errno)
723 return -errno;
724 else
725 return -EINVAL;
726 }
727 }
728 return 0;
729}
730
731static int record__mmap(struct record *rec)
732{
733 return record__mmap_evlist(rec, rec->evlist);
734}
735
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300736static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200737{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300738 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200739 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200740 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200741 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300742 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600743 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200744
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300745 /*
746 * For initial_delay we need to add a dummy event so that we can track
747 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
748 * real events, the ones asked by the user.
749 */
750 if (opts->initial_delay) {
751 if (perf_evlist__add_dummy(evlist))
752 return -ENOMEM;
753
754 pos = perf_evlist__first(evlist);
755 pos->tracking = 0;
756 pos = perf_evlist__last(evlist);
757 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200758 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300759 }
760
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300761 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100762
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300763 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200764try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200765 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300766 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900767 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300768 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300769 goto try_again;
770 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700771 if ((errno == EINVAL || errno == EBADF) &&
772 pos->leader != pos &&
773 pos->weak_group) {
774 pos = perf_evlist__reset_weak_group(evlist, pos);
775 goto try_again;
776 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300777 rc = -errno;
778 perf_evsel__open_strerror(pos, &opts->target,
779 errno, msg, sizeof(msg));
780 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600781 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300782 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800783
784 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800785 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200786
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300787 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300788 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300789 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300790 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600791 rc = -1;
792 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100793 }
794
Wang Nancda57a82016-06-27 10:24:03 +0000795 rc = record__mmap(rec);
796 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600797 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200798
Jiri Olsa563aecb2013-06-05 13:35:06 +0200799 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300800 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600801out:
802 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200803}
804
Namhyung Kime3d59112015-01-29 17:06:44 +0900805static int process_sample_event(struct perf_tool *tool,
806 union perf_event *event,
807 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200808 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900809 struct machine *machine)
810{
811 struct record *rec = container_of(tool, struct record, tool);
812
Jin Yao68588ba2017-12-08 21:13:42 +0800813 if (rec->evlist->first_sample_time == 0)
814 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900815
Jin Yao68588ba2017-12-08 21:13:42 +0800816 rec->evlist->last_sample_time = sample->time;
817
818 if (rec->buildid_all)
819 return 0;
820
821 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900822 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
823}
824
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300825static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200826{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200827 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200828
Jiri Olsa45112e82019-02-21 10:41:29 +0100829 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300830 return 0;
831
Namhyung Kim00dc8652014-11-04 10:14:32 +0900832 /*
833 * During this process, it'll load kernel map and replace the
834 * dso->long_name to a real pathname it found. In this case
835 * we prefer the vmlinux path like
836 * /lib/modules/3.16.4/build/vmlinux
837 *
838 * rather than build-id path (in debug directory).
839 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
840 */
841 symbol_conf.ignore_vmlinux_buildid = true;
842
Namhyung Kim61566812016-01-11 22:37:09 +0900843 /*
844 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800845 * so no need to process samples. But if timestamp_boundary is enabled,
846 * it still needs to walk on all samples to get the timestamps of
847 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900848 */
Jin Yao68588ba2017-12-08 21:13:42 +0800849 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900850 rec->tool.sample = NULL;
851
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300852 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200853}
854
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200855static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800856{
857 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200858 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800859 /*
860 *As for guest kernel when processing subcommand record&report,
861 *we arrange module mmap prior to guest kernel mmap and trigger
862 *a preload dso because default guest module symbols are loaded
863 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
864 *method is used to avoid symbol missing when the first addr is
865 *in module instead of in guest kernel.
866 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200867 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200868 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800869 if (err < 0)
870 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300871 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800872
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800873 /*
874 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
875 * have no _text sometimes.
876 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200877 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200878 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800879 if (err < 0)
880 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300881 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800882}
883
Frederic Weisbecker98402802010-05-02 22:05:29 +0200884static struct perf_event_header finished_round_event = {
885 .size = sizeof(struct perf_event_header),
886 .type = PERF_RECORD_FINISHED_ROUND,
887};
888
Alexey Budankovf13de662019-01-22 20:50:57 +0300889static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
890{
891 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
892 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
893 CPU_ZERO(&rec->affinity_mask);
894 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
895 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
896 }
897}
898
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300899static size_t process_comp_header(void *record, size_t increment)
900{
901 struct compressed_event *event = record;
902 size_t size = sizeof(*event);
903
904 if (increment) {
905 event->header.size += increment;
906 return increment;
907 }
908
909 event->header.type = PERF_RECORD_COMPRESSED;
910 event->header.size = size;
911
912 return size;
913}
914
915static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
916 void *src, size_t src_size)
917{
918 size_t compressed;
919 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct compressed_event) - 1;
920
921 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
922 max_record_size, process_comp_header);
923
924 session->bytes_transferred += src_size;
925 session->bytes_compressed += compressed;
926
927 return compressed;
928}
929
Jiri Olsa63503db2019-07-21 13:23:52 +0200930static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300931 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200932{
Jiri Olsadcabb502014-07-25 16:56:16 +0200933 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200934 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600935 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000936 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300937 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300938 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200939
Wang Nancb216862016-06-27 10:24:04 +0000940 if (!evlist)
941 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300942
Wang Nan0b72d692017-12-04 16:51:07 +0000943 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000944 if (!maps)
945 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000946
Wang Nan0b72d692017-12-04 16:51:07 +0000947 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000948 return 0;
949
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300950 if (record__aio_enabled(rec))
951 off = record__aio_get_pos(trace_fd);
952
Wang Nana4ea0ec2016-07-14 08:34:36 +0000953 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300954 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200955 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000956
Jiri Olsae035f4c2018-09-13 14:54:05 +0200957 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300958 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300959 if (synch) {
960 flush = map->flush;
961 map->flush = 1;
962 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300963 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300964 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300965 if (synch)
966 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300967 rc = -1;
968 goto out;
969 }
970 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300971 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300972 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300973 if (synch)
974 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300975 rc = -1;
976 goto out;
977 }
David Ahern8d3eca22012-08-26 12:24:47 -0600978 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300979 if (synch)
980 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600981 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300982
Jiri Olsae035f4c2018-09-13 14:54:05 +0200983 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
984 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300985 rc = -1;
986 goto out;
987 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200988 }
989
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300990 if (record__aio_enabled(rec))
991 record__aio_set_pos(trace_fd, off);
992
Jiri Olsadcabb502014-07-25 16:56:16 +0200993 /*
994 * Mark the round finished in case we wrote
995 * at least one event.
996 */
997 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200998 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600999
Wang Nan0b72d692017-12-04 16:51:07 +00001000 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001001 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001002out:
1003 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001004}
1005
Alexey Budankov470530b2019-03-18 20:40:26 +03001006static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001007{
1008 int err;
1009
Alexey Budankov470530b2019-03-18 20:40:26 +03001010 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001011 if (err)
1012 return err;
1013
Alexey Budankov470530b2019-03-18 20:40:26 +03001014 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001015}
1016
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001017static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001018{
David Ahern57706ab2013-11-06 11:41:34 -07001019 struct perf_session *session = rec->session;
1020 int feat;
1021
1022 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1023 perf_header__set_feat(&session->header, feat);
1024
1025 if (rec->no_buildid)
1026 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1027
Jiri Olsace9036a2019-07-21 13:24:23 +02001028 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001029 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1030
1031 if (!rec->opts.branch_stack)
1032 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001033
1034 if (!rec->opts.full_auxtrace)
1035 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001036
Alexey Budankovcf790512018-10-09 17:36:24 +03001037 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1038 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1039
Jiri Olsa258031c2019-03-08 14:47:39 +01001040 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001041 if (!record__comp_enabled(rec))
1042 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001043
Jiri Olsaffa517a2015-10-25 15:51:43 +01001044 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001045}
1046
Wang Nane1ab48b2016-02-26 09:32:10 +00001047static void
1048record__finish_output(struct record *rec)
1049{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001050 struct perf_data *data = &rec->data;
1051 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001052
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001053 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001054 return;
1055
1056 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001057 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001058
1059 if (!rec->no_buildid) {
1060 process_buildids(rec);
1061
1062 if (rec->buildid_all)
1063 dsos__hit_all(rec->session);
1064 }
1065 perf_session__write_header(rec->session, rec->evlist, fd, true);
1066
1067 return;
1068}
1069
Wang Nan4ea648a2016-07-14 08:34:47 +00001070static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001071{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001072 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001073 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001074
Wang Nan4ea648a2016-07-14 08:34:47 +00001075 if (rec->opts.tail_synthesize != tail)
1076 return 0;
1077
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001078 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1079 if (thread_map == NULL)
1080 return -1;
1081
1082 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001083 process_synthesized_event,
1084 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001085 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001086 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001087 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001088}
1089
Wang Nan4ea648a2016-07-14 08:34:47 +00001090static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001091
Wang Nanecfd7a92016-04-13 08:21:07 +00001092static int
1093record__switch_output(struct record *rec, bool at_exit)
1094{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001095 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001096 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001097 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001098
1099 /* Same Size: "2015122520103046"*/
1100 char timestamp[] = "InvalidTimestamp";
1101
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001102 record__aio_mmap_read_sync(rec);
1103
Wang Nan4ea648a2016-07-14 08:34:47 +00001104 record__synthesize(rec, true);
1105 if (target__none(&rec->opts.target))
1106 record__synthesize_workload(rec, true);
1107
Wang Nanecfd7a92016-04-13 08:21:07 +00001108 rec->samples = 0;
1109 record__finish_output(rec);
1110 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1111 if (err) {
1112 pr_err("Failed to get current timestamp\n");
1113 return -EINVAL;
1114 }
1115
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001116 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001117 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001118 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001119 if (fd >= 0 && !at_exit) {
1120 rec->bytes_written = 0;
1121 rec->session->header.data_size = 0;
1122 }
1123
1124 if (!quiet)
1125 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001126 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001127
Andi Kleen03724b22019-03-14 15:49:55 -07001128 if (rec->switch_output.num_files) {
1129 int n = rec->switch_output.cur_file + 1;
1130
1131 if (n >= rec->switch_output.num_files)
1132 n = 0;
1133 rec->switch_output.cur_file = n;
1134 if (rec->switch_output.filenames[n]) {
1135 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001136 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001137 }
1138 rec->switch_output.filenames[n] = new_filename;
1139 } else {
1140 free(new_filename);
1141 }
1142
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001143 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001144 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001145 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001146
Wang Nanbe7b0c92016-04-20 18:59:54 +00001147 /*
1148 * In 'perf record --switch-output' without -a,
1149 * record__synthesize() in record__switch_output() won't
1150 * generate tracking events because there's no thread_map
1151 * in evlist. Which causes newly created perf.data doesn't
1152 * contain map and comm information.
1153 * Create a fake thread_map and directly call
1154 * perf_event__synthesize_thread_map() for those events.
1155 */
1156 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001157 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001158 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001159 return fd;
1160}
1161
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001162static volatile int workload_exec_errno;
1163
1164/*
1165 * perf_evlist__prepare_workload will send a SIGUSR1
1166 * if the fork fails, since we asked by setting its
1167 * want_signal to true.
1168 */
Namhyung Kim45604712014-05-12 09:47:24 +09001169static void workload_exec_failed_signal(int signo __maybe_unused,
1170 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001171 void *ucontext __maybe_unused)
1172{
1173 workload_exec_errno = info->si_value.sival_int;
1174 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001175 child_finished = 1;
1176}
1177
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001178static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001179static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001180
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001181int __weak
1182perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1183 struct perf_tool *tool __maybe_unused,
1184 perf_event__handler_t process __maybe_unused,
1185 struct machine *machine __maybe_unused)
1186{
1187 return 0;
1188}
1189
Wang Nanee667f92016-06-27 10:24:05 +00001190static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001191perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001192{
Wang Nanb2cb6152016-07-14 08:34:39 +00001193 if (evlist) {
1194 if (evlist->mmap && evlist->mmap[0].base)
1195 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001196 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1197 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001198 }
Wang Nanee667f92016-06-27 10:24:05 +00001199 return NULL;
1200}
1201
Wang Nanc45628b2016-05-24 02:28:59 +00001202static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1203{
Wang Nanee667f92016-06-27 10:24:05 +00001204 const struct perf_event_mmap_page *pc;
1205
1206 pc = perf_evlist__pick_pc(rec->evlist);
1207 if (pc)
1208 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001209 return NULL;
1210}
1211
Wang Nan4ea648a2016-07-14 08:34:47 +00001212static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001213{
1214 struct perf_session *session = rec->session;
1215 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001216 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001217 struct record_opts *opts = &rec->opts;
1218 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001219 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001220 int err = 0;
1221
Wang Nan4ea648a2016-07-14 08:34:47 +00001222 if (rec->opts.tail_synthesize != tail)
1223 return 0;
1224
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001225 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001226 /*
1227 * We need to synthesize events first, because some
1228 * features works on top of them (on report side).
1229 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001230 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001231 process_synthesized_event);
1232 if (err < 0) {
1233 pr_err("Couldn't synthesize attrs.\n");
1234 goto out;
1235 }
1236
Jiri Olsaa2015512018-03-14 10:22:04 +01001237 err = perf_event__synthesize_features(tool, session, rec->evlist,
1238 process_synthesized_event);
1239 if (err < 0) {
1240 pr_err("Couldn't synthesize features.\n");
1241 return err;
1242 }
1243
Jiri Olsace9036a2019-07-21 13:24:23 +02001244 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001245 /*
1246 * FIXME err <= 0 here actually means that
1247 * there were no tracepoints so its not really
1248 * an error, just that we don't need to
1249 * synthesize anything. We really have to
1250 * return this more properly and also
1251 * propagate errors that now are calling die()
1252 */
1253 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1254 process_synthesized_event);
1255 if (err <= 0) {
1256 pr_err("Couldn't record tracing data.\n");
1257 goto out;
1258 }
1259 rec->bytes_written += err;
1260 }
1261 }
1262
Wang Nanc45628b2016-05-24 02:28:59 +00001263 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001264 process_synthesized_event, machine);
1265 if (err)
1266 goto out;
1267
Wang Nanc45c86e2016-02-26 09:32:07 +00001268 if (rec->opts.full_auxtrace) {
1269 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1270 session, process_synthesized_event);
1271 if (err)
1272 goto out;
1273 }
1274
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001275 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1276 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1277 machine);
1278 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1279 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1280 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001281
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001282 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1283 machine);
1284 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1285 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1286 "Check /proc/modules permission or run as root.\n");
1287 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001288
1289 if (perf_guest) {
1290 machines__process_guests(&session->machines,
1291 perf_event__synthesize_guest_os, tool);
1292 }
1293
Andi Kleenbfd8f722017-11-17 13:42:58 -08001294 err = perf_event__synthesize_extra_attr(&rec->tool,
1295 rec->evlist,
1296 process_synthesized_event,
1297 data->is_pipe);
1298 if (err)
1299 goto out;
1300
Jiri Olsa03617c22019-07-21 13:24:42 +02001301 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001302 process_synthesized_event,
1303 NULL);
1304 if (err < 0) {
1305 pr_err("Couldn't synthesize thread map.\n");
1306 return err;
1307 }
1308
Jiri Olsaf72f9012019-07-21 13:24:41 +02001309 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001310 process_synthesized_event, NULL);
1311 if (err < 0) {
1312 pr_err("Couldn't synthesize cpu map.\n");
1313 return err;
1314 }
1315
Song Liue5416952019-03-11 22:30:41 -07001316 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001317 machine, opts);
1318 if (err < 0)
1319 pr_warning("Couldn't synthesize bpf events.\n");
1320
Jiri Olsa03617c22019-07-21 13:24:42 +02001321 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001322 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001323 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001324out:
1325 return err;
1326}
1327
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001328static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001329{
David Ahern57706ab2013-11-06 11:41:34 -07001330 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001331 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001332 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001333 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001334 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001335 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001336 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001337 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001338 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001339 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001340 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001341 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001342
Namhyung Kim45604712014-05-12 09:47:24 +09001343 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001344 signal(SIGCHLD, sig_handler);
1345 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001346 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001347 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001348
Hari Bathinif3b36142017-03-08 02:11:43 +05301349 if (rec->opts.record_namespaces)
1350 tool->namespace_events = true;
1351
Jiri Olsadc0c6122017-01-09 10:51:58 +01001352 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001353 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001354 if (rec->opts.auxtrace_snapshot_mode)
1355 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001356 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001357 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001358 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001359 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001360 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001361
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001362 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001363 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001364 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001365 return -1;
1366 }
1367
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001368 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001369 rec->session = session;
1370
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001371 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1372 pr_err("Compression initialization failed.\n");
1373 return -1;
1374 }
1375
1376 session->header.env.comp_type = PERF_COMP_ZSTD;
1377 session->header.env.comp_level = rec->opts.comp_level;
1378
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001379 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001380
Alexey Budankovcf790512018-10-09 17:36:24 +03001381 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1382 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1383
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001384 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001385 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001386 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001387 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001388 if (err < 0) {
1389 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001390 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001391 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001392 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001393 }
1394
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001395 /*
1396 * If we have just single event and are sending data
1397 * through pipe, we need to force the ids allocation,
1398 * because we synthesize event name through the pipe
1399 * and need the id for that.
1400 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001401 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001402 rec->opts.sample_id = true;
1403
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001404 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001405 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001406 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001407 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001408 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001409
Wang Nan8690a2a2016-02-22 09:10:32 +00001410 err = bpf__apply_obj_config();
1411 if (err) {
1412 char errbuf[BUFSIZ];
1413
1414 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1415 pr_err("ERROR: Apply config to BPF failed: %s\n",
1416 errbuf);
1417 goto out_child;
1418 }
1419
Adrian Huntercca84822015-08-19 17:29:21 +03001420 /*
1421 * Normally perf_session__new would do this, but it doesn't have the
1422 * evlist.
1423 */
1424 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1425 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1426 rec->tool.ordered_events = false;
1427 }
1428
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001429 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001430 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1431
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001432 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001433 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001434 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001435 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001436 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001437 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001438 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001439 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001440 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001441
David Ahernd3665492012-02-06 15:27:52 -07001442 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001443 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001444 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001445 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001446 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001447 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001448 }
1449
Song Liud56354d2019-03-11 22:30:51 -07001450 if (!opts->no_bpf_event)
1451 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1452
Song Liu657ee552019-03-11 22:30:50 -07001453 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1454 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1455 opts->no_bpf_event = true;
1456 }
1457
Wang Nan4ea648a2016-07-14 08:34:47 +00001458 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001459 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001460 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001461
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001462 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001463 struct sched_param param;
1464
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001465 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001466 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001467 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001468 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001469 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001470 }
1471 }
1472
Jiri Olsa774cb492012-11-12 18:34:01 +01001473 /*
1474 * When perf is starting the traced process, all the events
1475 * (apart from group members) have enable_on_exec=1 set,
1476 * so don't spoil it by prematurely enabling them.
1477 */
Andi Kleen6619a532014-01-11 13:38:27 -08001478 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001479 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001480
Peter Zijlstra856e9662009-12-16 17:55:55 +01001481 /*
1482 * Let the child rip
1483 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001484 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001485 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001486 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301487 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001488
1489 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1490 if (event == NULL) {
1491 err = -ENOMEM;
1492 goto out_child;
1493 }
1494
Namhyung Kime803cf92015-09-22 09:24:55 +09001495 /*
1496 * Some H/W events are generated before COMM event
1497 * which is emitted during exec(), so perf script
1498 * cannot see a correct process name for those events.
1499 * Synthesize COMM event to prevent it.
1500 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301501 tgid = perf_event__synthesize_comm(tool, event,
1502 rec->evlist->workload.pid,
1503 process_synthesized_event,
1504 machine);
1505 free(event);
1506
1507 if (tgid == -1)
1508 goto out_child;
1509
1510 event = malloc(sizeof(event->namespaces) +
1511 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1512 machine->id_hdr_size);
1513 if (event == NULL) {
1514 err = -ENOMEM;
1515 goto out_child;
1516 }
1517
1518 /*
1519 * Synthesize NAMESPACES event for the command specified.
1520 */
1521 perf_event__synthesize_namespaces(tool, event,
1522 rec->evlist->workload.pid,
1523 tgid, process_synthesized_event,
1524 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001525 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001526
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001527 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001528 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001529
Andi Kleen6619a532014-01-11 13:38:27 -08001530 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001531 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001532 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001533 }
1534
Wang Nan5f9cf592016-04-20 18:59:49 +00001535 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001536 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001537 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001538 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001539 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001540
Wang Nan057374642016-07-14 08:34:43 +00001541 /*
1542 * rec->evlist->bkw_mmap_state is possible to be
1543 * BKW_MMAP_EMPTY here: when done == true and
1544 * hits != rec->samples in previous round.
1545 *
1546 * perf_evlist__toggle_bkw_mmap ensure we never
1547 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1548 */
1549 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1550 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1551
Alexey Budankov470530b2019-03-18 20:40:26 +03001552 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001553 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001554 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001555 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001556 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001557 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001558
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001559 if (auxtrace_record__snapshot_started) {
1560 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001561 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001562 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001563 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001564 pr_err("AUX area tracing snapshot failed\n");
1565 err = -1;
1566 goto out_child;
1567 }
1568 }
1569
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001570 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001571 /*
1572 * If switch_output_trigger is hit, the data in
1573 * overwritable ring buffer should have been collected,
1574 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1575 *
1576 * If SIGUSR2 raise after or during record__mmap_read_all(),
1577 * record__mmap_read_all() didn't collect data from
1578 * overwritable ring buffer. Read again.
1579 */
1580 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1581 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001582 trigger_ready(&switch_output_trigger);
1583
Wang Nan057374642016-07-14 08:34:43 +00001584 /*
1585 * Reenable events in overwrite ring buffer after
1586 * record__mmap_read_all(): we should have collected
1587 * data from it.
1588 */
1589 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1590
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001591 if (!quiet)
1592 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1593 waking);
1594 waking = 0;
1595 fd = record__switch_output(rec, false);
1596 if (fd < 0) {
1597 pr_err("Failed to switch to new file\n");
1598 trigger_error(&switch_output_trigger);
1599 err = fd;
1600 goto out_child;
1601 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001602
1603 /* re-arm the alarm */
1604 if (rec->switch_output.time)
1605 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001606 }
1607
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001608 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001609 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001610 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001611 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001612 /*
1613 * Propagate error, only if there's any. Ignore positive
1614 * number of returned events and interrupt error.
1615 */
1616 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001617 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001618 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001619
1620 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1621 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001622 }
1623
Jiri Olsa774cb492012-11-12 18:34:01 +01001624 /*
1625 * When perf is starting the traced process, at the end events
1626 * die with the process and we wait for that. Thus no need to
1627 * disable events in this case.
1628 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001629 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001630 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001631 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001632 disabled = true;
1633 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001634 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001635
Wang Nan5f9cf592016-04-20 18:59:49 +00001636 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001637 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001638
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001639 if (opts->auxtrace_snapshot_on_exit)
1640 record__auxtrace_snapshot_exit(rec);
1641
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001642 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001643 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001644 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001645 pr_err("Workload failed: %s\n", emsg);
1646 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001647 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001648 }
1649
Namhyung Kime3d59112015-01-29 17:06:44 +09001650 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001651 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001652
Wang Nan4ea648a2016-07-14 08:34:47 +00001653 if (target__none(&rec->opts.target))
1654 record__synthesize_workload(rec, true);
1655
Namhyung Kim45604712014-05-12 09:47:24 +09001656out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001657 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001658 record__aio_mmap_read_sync(rec);
1659
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001660 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1661 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1662 session->header.env.comp_ratio = ratio + 0.5;
1663 }
1664
Namhyung Kim45604712014-05-12 09:47:24 +09001665 if (forks) {
1666 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001667
Namhyung Kim45604712014-05-12 09:47:24 +09001668 if (!child_finished)
1669 kill(rec->evlist->workload.pid, SIGTERM);
1670
1671 wait(&exit_status);
1672
1673 if (err < 0)
1674 status = err;
1675 else if (WIFEXITED(exit_status))
1676 status = WEXITSTATUS(exit_status);
1677 else if (WIFSIGNALED(exit_status))
1678 signr = WTERMSIG(exit_status);
1679 } else
1680 status = err;
1681
Wang Nan4ea648a2016-07-14 08:34:47 +00001682 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001683 /* this will be recalculated during process_buildids() */
1684 rec->samples = 0;
1685
Wang Nanecfd7a92016-04-13 08:21:07 +00001686 if (!err) {
1687 if (!rec->timestamp_filename) {
1688 record__finish_output(rec);
1689 } else {
1690 fd = record__switch_output(rec, true);
1691 if (fd < 0) {
1692 status = fd;
1693 goto out_delete_session;
1694 }
1695 }
1696 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001697
Wang Nana0748652016-11-26 07:03:28 +00001698 perf_hooks__invoke_record_end();
1699
Namhyung Kime3d59112015-01-29 17:06:44 +09001700 if (!err && !quiet) {
1701 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001702 const char *postfix = rec->timestamp_filename ?
1703 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001704
Adrian Hunteref149c22015-04-09 18:53:45 +03001705 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001706 scnprintf(samples, sizeof(samples),
1707 " (%" PRIu64 " samples)", rec->samples);
1708 else
1709 samples[0] = '\0';
1710
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001711 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001712 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001713 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001714 if (ratio) {
1715 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1716 rec->session->bytes_transferred / 1024.0 / 1024.0,
1717 ratio);
1718 }
1719 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001720 }
1721
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001722out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001723 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001724 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001725
1726 if (!opts->no_bpf_event)
1727 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001728 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001729}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001730
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001731static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001732{
Kan Liangaad2b212015-01-05 13:23:04 -05001733 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001734
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001735 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001736
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001737 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001738 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001739 callchain->dump_size);
1740}
1741
1742int record_opts__parse_callchain(struct record_opts *record,
1743 struct callchain_param *callchain,
1744 const char *arg, bool unset)
1745{
1746 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001747 callchain->enabled = !unset;
1748
1749 /* --no-call-graph */
1750 if (unset) {
1751 callchain->record_mode = CALLCHAIN_NONE;
1752 pr_debug("callchain: disabled\n");
1753 return 0;
1754 }
1755
1756 ret = parse_callchain_record_opt(arg, callchain);
1757 if (!ret) {
1758 /* Enable data address sampling for DWARF unwind. */
1759 if (callchain->record_mode == CALLCHAIN_DWARF)
1760 record->sample_address = true;
1761 callchain_debug(callchain);
1762 }
1763
1764 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001765}
1766
Kan Liangc421e802015-07-29 05:42:12 -04001767int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001768 const char *arg,
1769 int unset)
1770{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001771 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001772}
1773
Kan Liangc421e802015-07-29 05:42:12 -04001774int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001775 const char *arg __maybe_unused,
1776 int unset __maybe_unused)
1777{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001778 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001779
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001780 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001781
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001782 if (callchain->record_mode == CALLCHAIN_NONE)
1783 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001784
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001785 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001786 return 0;
1787}
1788
Jiri Olsaeb853e82014-02-03 12:44:42 +01001789static int perf_record_config(const char *var, const char *value, void *cb)
1790{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001791 struct record *rec = cb;
1792
1793 if (!strcmp(var, "record.build-id")) {
1794 if (!strcmp(value, "cache"))
1795 rec->no_buildid_cache = false;
1796 else if (!strcmp(value, "no-cache"))
1797 rec->no_buildid_cache = true;
1798 else if (!strcmp(value, "skip"))
1799 rec->no_buildid = true;
1800 else
1801 return -1;
1802 return 0;
1803 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001804 if (!strcmp(var, "record.call-graph")) {
1805 var = "call-graph.record-mode";
1806 return perf_default_config(var, value, cb);
1807 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001808#ifdef HAVE_AIO_SUPPORT
1809 if (!strcmp(var, "record.aio")) {
1810 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1811 if (!rec->opts.nr_cblocks)
1812 rec->opts.nr_cblocks = nr_cblocks_default;
1813 }
1814#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001815
Yisheng Xiecff17202018-03-12 19:25:57 +08001816 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001817}
1818
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001819struct clockid_map {
1820 const char *name;
1821 int clockid;
1822};
1823
1824#define CLOCKID_MAP(n, c) \
1825 { .name = n, .clockid = (c), }
1826
1827#define CLOCKID_END { .name = NULL, }
1828
1829
1830/*
1831 * Add the missing ones, we need to build on many distros...
1832 */
1833#ifndef CLOCK_MONOTONIC_RAW
1834#define CLOCK_MONOTONIC_RAW 4
1835#endif
1836#ifndef CLOCK_BOOTTIME
1837#define CLOCK_BOOTTIME 7
1838#endif
1839#ifndef CLOCK_TAI
1840#define CLOCK_TAI 11
1841#endif
1842
1843static const struct clockid_map clockids[] = {
1844 /* available for all events, NMI safe */
1845 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1846 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1847
1848 /* available for some events */
1849 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1850 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1851 CLOCKID_MAP("tai", CLOCK_TAI),
1852
1853 /* available for the lazy */
1854 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1855 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1856 CLOCKID_MAP("real", CLOCK_REALTIME),
1857 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1858
1859 CLOCKID_END,
1860};
1861
Alexey Budankovcf790512018-10-09 17:36:24 +03001862static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1863{
1864 struct timespec res;
1865
1866 *res_ns = 0;
1867 if (!clock_getres(clk_id, &res))
1868 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1869 else
1870 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1871
1872 return 0;
1873}
1874
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001875static int parse_clockid(const struct option *opt, const char *str, int unset)
1876{
1877 struct record_opts *opts = (struct record_opts *)opt->value;
1878 const struct clockid_map *cm;
1879 const char *ostr = str;
1880
1881 if (unset) {
1882 opts->use_clockid = 0;
1883 return 0;
1884 }
1885
1886 /* no arg passed */
1887 if (!str)
1888 return 0;
1889
1890 /* no setting it twice */
1891 if (opts->use_clockid)
1892 return -1;
1893
1894 opts->use_clockid = true;
1895
1896 /* if its a number, we're done */
1897 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001898 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001899
1900 /* allow a "CLOCK_" prefix to the name */
1901 if (!strncasecmp(str, "CLOCK_", 6))
1902 str += 6;
1903
1904 for (cm = clockids; cm->name; cm++) {
1905 if (!strcasecmp(str, cm->name)) {
1906 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001907 return get_clockid_res(opts->clockid,
1908 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001909 }
1910 }
1911
1912 opts->use_clockid = false;
1913 ui__warning("unknown clockid %s, check man page\n", ostr);
1914 return -1;
1915}
1916
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001917static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1918{
1919 struct record_opts *opts = (struct record_opts *)opt->value;
1920
1921 if (unset || !str)
1922 return 0;
1923
1924 if (!strcasecmp(str, "node"))
1925 opts->affinity = PERF_AFFINITY_NODE;
1926 else if (!strcasecmp(str, "cpu"))
1927 opts->affinity = PERF_AFFINITY_CPU;
1928
1929 return 0;
1930}
1931
Adrian Huntere9db1312015-04-09 18:53:46 +03001932static int record__parse_mmap_pages(const struct option *opt,
1933 const char *str,
1934 int unset __maybe_unused)
1935{
1936 struct record_opts *opts = opt->value;
1937 char *s, *p;
1938 unsigned int mmap_pages;
1939 int ret;
1940
1941 if (!str)
1942 return -EINVAL;
1943
1944 s = strdup(str);
1945 if (!s)
1946 return -ENOMEM;
1947
1948 p = strchr(s, ',');
1949 if (p)
1950 *p = '\0';
1951
1952 if (*s) {
1953 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1954 if (ret)
1955 goto out_free;
1956 opts->mmap_pages = mmap_pages;
1957 }
1958
1959 if (!p) {
1960 ret = 0;
1961 goto out_free;
1962 }
1963
1964 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1965 if (ret)
1966 goto out_free;
1967
1968 opts->auxtrace_mmap_pages = mmap_pages;
1969
1970out_free:
1971 free(s);
1972 return ret;
1973}
1974
Jiri Olsa0c582442017-01-09 10:51:59 +01001975static void switch_output_size_warn(struct record *rec)
1976{
1977 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1978 struct switch_output *s = &rec->switch_output;
1979
1980 wakeup_size /= 2;
1981
1982 if (s->size < wakeup_size) {
1983 char buf[100];
1984
1985 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1986 pr_warning("WARNING: switch-output data size lower than "
1987 "wakeup kernel buffer size (%s) "
1988 "expect bigger perf.data sizes\n", buf);
1989 }
1990}
1991
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001992static int switch_output_setup(struct record *rec)
1993{
1994 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001995 static struct parse_tag tags_size[] = {
1996 { .tag = 'B', .mult = 1 },
1997 { .tag = 'K', .mult = 1 << 10 },
1998 { .tag = 'M', .mult = 1 << 20 },
1999 { .tag = 'G', .mult = 1 << 30 },
2000 { .tag = 0 },
2001 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002002 static struct parse_tag tags_time[] = {
2003 { .tag = 's', .mult = 1 },
2004 { .tag = 'm', .mult = 60 },
2005 { .tag = 'h', .mult = 60*60 },
2006 { .tag = 'd', .mult = 60*60*24 },
2007 { .tag = 0 },
2008 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002009 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002010
2011 if (!s->set)
2012 return 0;
2013
2014 if (!strcmp(s->str, "signal")) {
2015 s->signal = true;
2016 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002017 goto enabled;
2018 }
2019
2020 val = parse_tag_value(s->str, tags_size);
2021 if (val != (unsigned long) -1) {
2022 s->size = val;
2023 pr_debug("switch-output with %s size threshold\n", s->str);
2024 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002025 }
2026
Jiri Olsabfacbe32017-01-09 10:52:00 +01002027 val = parse_tag_value(s->str, tags_time);
2028 if (val != (unsigned long) -1) {
2029 s->time = val;
2030 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2031 s->str, s->time);
2032 goto enabled;
2033 }
2034
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002035 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002036
2037enabled:
2038 rec->timestamp_filename = true;
2039 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002040
2041 if (s->size && !rec->opts.no_buffering)
2042 switch_output_size_warn(rec);
2043
Jiri Olsadc0c6122017-01-09 10:51:58 +01002044 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002045}
2046
Namhyung Kime5b2c202014-10-23 00:15:46 +09002047static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002048 "perf record [<options>] [<command>]",
2049 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002050 NULL
2051};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002052const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002053
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002054/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002055 * XXX Ideally would be local to cmd_record() and passed to a record__new
2056 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002057 * after cmd_record() exits, but since record_options need to be accessible to
2058 * builtin-script, leave it here.
2059 *
2060 * At least we don't ouch it in all the other functions here directly.
2061 *
2062 * Just say no to tons of global variables, sigh.
2063 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002064static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002065 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002066 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002067 .mmap_pages = UINT_MAX,
2068 .user_freq = UINT_MAX,
2069 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002070 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002071 .target = {
2072 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002073 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002074 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002075 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002076 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002077 .tool = {
2078 .sample = process_sample_event,
2079 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002080 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002081 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302082 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002083 .mmap = perf_event__process_mmap,
2084 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002085 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002086 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002087};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002088
Namhyung Kim76a26542015-10-22 23:28:32 +09002089const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2090 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002091
Wang Nan0aab2132016-06-16 08:02:41 +00002092static bool dry_run;
2093
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002094/*
2095 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2096 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002097 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002098 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2099 * using pipes, etc.
2100 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002101static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002102 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002103 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002104 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002105 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002106 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002107 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2108 NULL, "don't record events from perf itself",
2109 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002110 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002111 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002112 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002113 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002114 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002115 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002116 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002117 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002118 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002119 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002120 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002121 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002122 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002123 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002124 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002125 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002126 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002127 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2128 &record.opts.no_inherit_set,
2129 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002130 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2131 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002132 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002133 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002134 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2135 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002136 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2137 "profile at this frequency",
2138 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002139 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2140 "number of mmap data pages and AUX area tracing mmap pages",
2141 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002142 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2143 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2144 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002145 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002146 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002147 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002148 NULL, "enables call-graph recording" ,
2149 &record_callchain_opt),
2150 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002151 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002152 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002153 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002154 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002155 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002156 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002157 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002158 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002159 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2160 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002161 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002162 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2163 &record.opts.sample_time_set,
2164 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002165 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2166 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002167 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002168 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002169 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2170 &record.no_buildid_cache_set,
2171 "do not update the buildid cache"),
2172 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2173 &record.no_buildid_set,
2174 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002175 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002176 "monitor event in cgroup name only",
2177 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002178 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002179 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002180 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2181 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002182
2183 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2184 "branch any", "sample any taken branches",
2185 parse_branch_stack),
2186
2187 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2188 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002189 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002190 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2191 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002192 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2193 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002194 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2195 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002196 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2197 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002198 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002199 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2200 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002201 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002202 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2203 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002204 OPT_CALLBACK('k', "clockid", &record.opts,
2205 "clockid", "clockid to use for events, see clock_gettime()",
2206 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002207 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2208 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002209 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002210 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302211 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2212 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002213 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2214 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002215 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2216 "Configure all used events to run in kernel space.",
2217 PARSE_OPT_EXCLUSIVE),
2218 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2219 "Configure all used events to run in user space.",
2220 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002221 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2222 "collect kernel callchains"),
2223 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2224 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002225 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2226 "clang binary to use for compiling BPF scriptlets"),
2227 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2228 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002229 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2230 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002231 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2232 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002233 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2234 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002235 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2236 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002237 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002238 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2239 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002240 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002241 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2242 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002243 OPT_BOOLEAN(0, "dry-run", &dry_run,
2244 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002245#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002246 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2247 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002248 record__aio_parse),
2249#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002250 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2251 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2252 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002253#ifdef HAVE_ZSTD_SUPPORT
2254 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2255 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2256 record__parse_comp_level),
2257#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002258 OPT_END()
2259};
2260
Namhyung Kime5b2c202014-10-23 00:15:46 +09002261struct option *record_options = __record_options;
2262
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002263int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002264{
Adrian Hunteref149c22015-04-09 18:53:45 +03002265 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002266 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002267 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002268
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002269 setlocale(LC_ALL, "");
2270
Wang Nan48e1cab2015-12-14 10:39:22 +00002271#ifndef HAVE_LIBBPF_SUPPORT
2272# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2273 set_nobuild('\0', "clang-path", true);
2274 set_nobuild('\0', "clang-opt", true);
2275# undef set_nobuild
2276#endif
2277
He Kuang7efe0e02015-12-14 10:39:23 +00002278#ifndef HAVE_BPF_PROLOGUE
2279# if !defined (HAVE_DWARF_SUPPORT)
2280# define REASON "NO_DWARF=1"
2281# elif !defined (HAVE_LIBBPF_SUPPORT)
2282# define REASON "NO_LIBBPF=1"
2283# else
2284# define REASON "this architecture doesn't support BPF prologue"
2285# endif
2286# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2287 set_nobuild('\0', "vmlinux", true);
2288# undef set_nobuild
2289# undef REASON
2290#endif
2291
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002292 CPU_ZERO(&rec->affinity_mask);
2293 rec->opts.affinity = PERF_AFFINITY_SYS;
2294
Jiri Olsa0f98b112019-07-21 13:23:55 +02002295 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002296 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002297 return -ENOMEM;
2298
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002299 err = perf_config(perf_record_config, rec);
2300 if (err)
2301 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002302
Tom Zanussibca647a2010-11-10 08:11:30 -06002303 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002304 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002305 if (quiet)
2306 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002307
2308 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002309 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002310 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002311
Namhyung Kimbea03402012-04-26 14:15:15 +09002312 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002313 usage_with_options_msg(record_usage, record_options,
2314 "cgroup monitoring only available in system-wide mode");
2315
Stephane Eranian023695d2011-02-14 11:20:01 +02002316 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002317
2318 if (rec->opts.comp_level != 0) {
2319 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2320 rec->no_buildid = true;
2321 }
2322
Adrian Hunterb757bb02015-07-21 12:44:04 +03002323 if (rec->opts.record_switch_events &&
2324 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002325 ui__error("kernel does not support recording context switch events\n");
2326 parse_options_usage(record_usage, record_options, "switch-events", 0);
2327 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002328 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002329
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002330 if (switch_output_setup(rec)) {
2331 parse_options_usage(record_usage, record_options, "switch-output", 0);
2332 return -EINVAL;
2333 }
2334
Jiri Olsabfacbe32017-01-09 10:52:00 +01002335 if (rec->switch_output.time) {
2336 signal(SIGALRM, alarm_sig_handler);
2337 alarm(rec->switch_output.time);
2338 }
2339
Andi Kleen03724b22019-03-14 15:49:55 -07002340 if (rec->switch_output.num_files) {
2341 rec->switch_output.filenames = calloc(sizeof(char *),
2342 rec->switch_output.num_files);
2343 if (!rec->switch_output.filenames)
2344 return -EINVAL;
2345 }
2346
Adrian Hunter1b36c032016-09-23 17:38:39 +03002347 /*
2348 * Allow aliases to facilitate the lookup of symbols for address
2349 * filters. Refer to auxtrace_parse_filters().
2350 */
2351 symbol_conf.allow_aliases = true;
2352
2353 symbol__init(NULL);
2354
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002355 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002356 if (err)
2357 goto out;
2358
Wang Nan0aab2132016-06-16 08:02:41 +00002359 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002360 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002361
Wang Nand7888572016-04-08 15:07:24 +00002362 err = bpf__setup_stdout(rec->evlist);
2363 if (err) {
2364 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2365 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2366 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002367 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002368 }
2369
Adrian Hunteref149c22015-04-09 18:53:45 +03002370 err = -ENOMEM;
2371
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002372 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002373 pr_warning(
2374"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2375"check /proc/sys/kernel/kptr_restrict.\n\n"
2376"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2377"file is not found in the buildid cache or in the vmlinux path.\n\n"
2378"Samples in kernel modules won't be resolved at all.\n\n"
2379"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2380"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002381
Wang Nan0c1d46a2016-04-20 18:59:52 +00002382 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002383 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002384 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002385 /*
2386 * In 'perf record --switch-output', disable buildid
2387 * generation by default to reduce data file switching
2388 * overhead. Still generate buildid if they are required
2389 * explicitly using
2390 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002391 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002392 * --no-no-buildid-cache
2393 *
2394 * Following code equals to:
2395 *
2396 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2397 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2398 * disable_buildid_cache();
2399 */
2400 bool disable = true;
2401
2402 if (rec->no_buildid_set && !rec->no_buildid)
2403 disable = false;
2404 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2405 disable = false;
2406 if (disable) {
2407 rec->no_buildid = true;
2408 rec->no_buildid_cache = true;
2409 disable_buildid_cache();
2410 }
2411 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002412
Wang Nan4ea648a2016-07-14 08:34:47 +00002413 if (record.opts.overwrite)
2414 record.opts.tail_synthesize = true;
2415
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002416 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002417 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002418 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002419 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002420 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002421
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002422 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2423 rec->opts.no_inherit = true;
2424
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002425 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002426 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002427 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002428 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002429 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002430
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002431 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002432 if (err) {
2433 int saved_errno = errno;
2434
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002435 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002436 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002437
2438 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002439 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002440 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002441
Mengting Zhangca800062017-12-13 15:01:53 +08002442 /* Enable ignoring missing threads when -u/-p option is defined. */
2443 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002444
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002445 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002446 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002447 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002448
Adrian Hunteref149c22015-04-09 18:53:45 +03002449 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2450 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002451 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002452
Namhyung Kim61566812016-01-11 22:37:09 +09002453 /*
2454 * We take all buildids when the file contains
2455 * AUX area tracing data because we do not decode the
2456 * trace because it would take too long.
2457 */
2458 if (rec->opts.full_auxtrace)
2459 rec->buildid_all = true;
2460
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002461 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002462 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002463 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002464 }
2465
Alexey Budankov93f20c02018-11-06 12:07:19 +03002466 if (rec->opts.nr_cblocks > nr_cblocks_max)
2467 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002468 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002469
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002470 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002471 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002472
Alexey Budankov51255a82019-03-18 20:42:19 +03002473 if (rec->opts.comp_level > comp_level_max)
2474 rec->opts.comp_level = comp_level_max;
2475 pr_debug("comp level: %d\n", rec->opts.comp_level);
2476
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002477 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002478out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002479 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002480 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002481 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002482 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002483}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002484
2485static void snapshot_sig_handler(int sig __maybe_unused)
2486{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002487 struct record *rec = &record;
2488
Wang Nan5f9cf592016-04-20 18:59:49 +00002489 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2490 trigger_hit(&auxtrace_snapshot_trigger);
2491 auxtrace_record__snapshot_started = 1;
2492 if (auxtrace_record__snapshot_start(record.itr))
2493 trigger_error(&auxtrace_snapshot_trigger);
2494 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002495
Jiri Olsadc0c6122017-01-09 10:51:58 +01002496 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002497 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002498}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002499
2500static void alarm_sig_handler(int sig __maybe_unused)
2501{
2502 struct record *rec = &record;
2503
2504 if (switch_output_time(rec))
2505 trigger_hit(&switch_output_trigger);
2506}