blob: 1447004eee8accfc57aa161d7a87b477f63f0b6b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030041#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030042#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080043#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030045#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030056#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030057#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030058#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030059
Jiri Olsa1b43b702017-01-09 10:51:56 +010060struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010062 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010063 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010064 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010065 const char *str;
66 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070067 char **filenames;
68 int num_files;
69 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010070};
71
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030072struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020073 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030074 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010076 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030077 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020078 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020080 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020083 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000084 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090085 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000086 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080087 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010088 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070089 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030090 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020091};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020092
Jiri Olsadc0c6122017-01-09 10:51:58 +010093static volatile int auxtrace_record__snapshot_started;
94static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
95static DEFINE_TRIGGER(switch_output_trigger);
96
Alexey Budankov9d2ed642019-01-22 20:47:43 +030097static const char *affinity_tags[PERF_AFFINITY_MAX] = {
98 "SYS", "NODE", "CPU"
99};
100
Jiri Olsadc0c6122017-01-09 10:51:58 +0100101static bool switch_output_signal(struct record *rec)
102{
103 return rec->switch_output.signal &&
104 trigger_is_ready(&switch_output_trigger);
105}
106
107static bool switch_output_size(struct record *rec)
108{
109 return rec->switch_output.size &&
110 trigger_is_ready(&switch_output_trigger) &&
111 (rec->bytes_written >= rec->switch_output.size);
112}
113
Jiri Olsabfacbe32017-01-09 10:52:00 +0100114static bool switch_output_time(struct record *rec)
115{
116 return rec->switch_output.time &&
117 trigger_is_ready(&switch_output_trigger);
118}
119
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200120static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
121 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200122{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200123 struct perf_data_file *file = &rec->session->data->file;
124
125 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100126 pr_err("failed to write perf data, error: %m\n");
127 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200128 }
David Ahern8d3eca22012-08-26 12:24:47 -0600129
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300130 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100131
132 if (switch_output_size(rec))
133 trigger_hit(&switch_output_trigger);
134
David Ahern8d3eca22012-08-26 12:24:47 -0600135 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200136}
137
Alexey Budankovef781122019-03-18 20:44:12 +0300138static int record__aio_enabled(struct record *rec);
139static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300140static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
141 void *src, size_t src_size);
142
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300143#ifdef HAVE_AIO_SUPPORT
144static int record__aio_write(struct aiocb *cblock, int trace_fd,
145 void *buf, size_t size, off_t off)
146{
147 int rc;
148
149 cblock->aio_fildes = trace_fd;
150 cblock->aio_buf = buf;
151 cblock->aio_nbytes = size;
152 cblock->aio_offset = off;
153 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
154
155 do {
156 rc = aio_write(cblock);
157 if (rc == 0) {
158 break;
159 } else if (errno != EAGAIN) {
160 cblock->aio_fildes = -1;
161 pr_err("failed to queue perf data, error: %m\n");
162 break;
163 }
164 } while (1);
165
166 return rc;
167}
168
169static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
170{
171 void *rem_buf;
172 off_t rem_off;
173 size_t rem_size;
174 int rc, aio_errno;
175 ssize_t aio_ret, written;
176
177 aio_errno = aio_error(cblock);
178 if (aio_errno == EINPROGRESS)
179 return 0;
180
181 written = aio_ret = aio_return(cblock);
182 if (aio_ret < 0) {
183 if (aio_errno != EINTR)
184 pr_err("failed to write perf data, error: %m\n");
185 written = 0;
186 }
187
188 rem_size = cblock->aio_nbytes - written;
189
190 if (rem_size == 0) {
191 cblock->aio_fildes = -1;
192 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300193 * md->refcount is incremented in record__aio_pushfn() for
194 * every aio write request started in record__aio_push() so
195 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300196 */
197 perf_mmap__put(md);
198 rc = 1;
199 } else {
200 /*
201 * aio write request may require restart with the
202 * reminder if the kernel didn't write whole
203 * chunk at once.
204 */
205 rem_off = cblock->aio_offset + written;
206 rem_buf = (void *)(cblock->aio_buf + written);
207 record__aio_write(cblock, cblock->aio_fildes,
208 rem_buf, rem_size, rem_off);
209 rc = 0;
210 }
211
212 return rc;
213}
214
Alexey Budankov93f20c02018-11-06 12:07:19 +0300215static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300216{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300217 struct aiocb **aiocb = md->aio.aiocb;
218 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300219 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300220 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300221
222 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300223 do_suspend = 0;
224 for (i = 0; i < md->aio.nr_cblocks; ++i) {
225 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
226 if (sync_all)
227 aiocb[i] = NULL;
228 else
229 return i;
230 } else {
231 /*
232 * Started aio write is not complete yet
233 * so it has to be waited before the
234 * next allocation.
235 */
236 aiocb[i] = &cblocks[i];
237 do_suspend = 1;
238 }
239 }
240 if (!do_suspend)
241 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300242
Alexey Budankov93f20c02018-11-06 12:07:19 +0300243 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300244 if (!(errno == EAGAIN || errno == EINTR))
245 pr_err("failed to sync perf data, error: %m\n");
246 }
247 } while (1);
248}
249
Alexey Budankovef781122019-03-18 20:44:12 +0300250struct record_aio {
251 struct record *rec;
252 void *data;
253 size_t size;
254};
255
256static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300257{
Alexey Budankovef781122019-03-18 20:44:12 +0300258 struct record_aio *aio = to;
259
260 /*
261 * map->base data pointed by buf is copied into free map->aio.data[] buffer
262 * to release space in the kernel buffer as fast as possible, calling
263 * perf_mmap__consume() from perf_mmap__push() function.
264 *
265 * That lets the kernel to proceed with storing more profiling data into
266 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
267 *
268 * Coping can be done in two steps in case the chunk of profiling data
269 * crosses the upper bound of the kernel buffer. In this case we first move
270 * part of data from map->start till the upper bound and then the reminder
271 * from the beginning of the kernel buffer till the end of the data chunk.
272 */
273
274 if (record__comp_enabled(aio->rec)) {
275 size = zstd_compress(aio->rec->session, aio->data + aio->size,
276 perf_mmap__mmap_len(map) - aio->size,
277 buf, size);
278 } else {
279 memcpy(aio->data + aio->size, buf, size);
280 }
281
282 if (!aio->size) {
283 /*
284 * Increment map->refcount to guard map->aio.data[] buffer
285 * from premature deallocation because map object can be
286 * released earlier than aio write request started on
287 * map->aio.data[] buffer is complete.
288 *
289 * perf_mmap__put() is done at record__aio_complete()
290 * after started aio request completion or at record__aio_push()
291 * if the request failed to start.
292 */
293 perf_mmap__get(map);
294 }
295
296 aio->size += size;
297
298 return size;
299}
300
301static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
302{
303 int ret, idx;
304 int trace_fd = rec->session->data->file.fd;
305 struct record_aio aio = { .rec = rec, .size = 0 };
306
307 /*
308 * Call record__aio_sync() to wait till map->aio.data[] buffer
309 * becomes available after previous aio write operation.
310 */
311
312 idx = record__aio_sync(map, false);
313 aio.data = map->aio.data[idx];
314 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
315 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
316 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300317
318 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300319 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300320 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300321 *off += aio.size;
322 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300323 if (switch_output_size(rec))
324 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300325 } else {
326 /*
327 * Decrement map->refcount incremented in record__aio_pushfn()
328 * back if record__aio_write() operation failed to start, otherwise
329 * map->refcount is decremented in record__aio_complete() after
330 * aio write operation finishes successfully.
331 */
332 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300333 }
334
335 return ret;
336}
337
338static off_t record__aio_get_pos(int trace_fd)
339{
340 return lseek(trace_fd, 0, SEEK_CUR);
341}
342
343static void record__aio_set_pos(int trace_fd, off_t pos)
344{
345 lseek(trace_fd, pos, SEEK_SET);
346}
347
348static void record__aio_mmap_read_sync(struct record *rec)
349{
350 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200351 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300352 struct perf_mmap *maps = evlist->mmap;
353
Alexey Budankovef781122019-03-18 20:44:12 +0300354 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300355 return;
356
357 for (i = 0; i < evlist->nr_mmaps; i++) {
358 struct perf_mmap *map = &maps[i];
359
360 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300361 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300362 }
363}
364
365static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300366static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300367
368static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300369 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300370 int unset)
371{
372 struct record_opts *opts = (struct record_opts *)opt->value;
373
Alexey Budankov93f20c02018-11-06 12:07:19 +0300374 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300375 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300376 } else {
377 if (str)
378 opts->nr_cblocks = strtol(str, NULL, 0);
379 if (!opts->nr_cblocks)
380 opts->nr_cblocks = nr_cblocks_default;
381 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300382
383 return 0;
384}
385#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300386static int nr_cblocks_max = 0;
387
Alexey Budankovef781122019-03-18 20:44:12 +0300388static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
389 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300390{
391 return -1;
392}
393
394static off_t record__aio_get_pos(int trace_fd __maybe_unused)
395{
396 return -1;
397}
398
399static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
400{
401}
402
403static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
404{
405}
406#endif
407
408static int record__aio_enabled(struct record *rec)
409{
410 return rec->opts.nr_cblocks > 0;
411}
412
Alexey Budankov470530b2019-03-18 20:40:26 +0300413#define MMAP_FLUSH_DEFAULT 1
414static int record__mmap_flush_parse(const struct option *opt,
415 const char *str,
416 int unset)
417{
418 int flush_max;
419 struct record_opts *opts = (struct record_opts *)opt->value;
420 static struct parse_tag tags[] = {
421 { .tag = 'B', .mult = 1 },
422 { .tag = 'K', .mult = 1 << 10 },
423 { .tag = 'M', .mult = 1 << 20 },
424 { .tag = 'G', .mult = 1 << 30 },
425 { .tag = 0 },
426 };
427
428 if (unset)
429 return 0;
430
431 if (str) {
432 opts->mmap_flush = parse_tag_value(str, tags);
433 if (opts->mmap_flush == (int)-1)
434 opts->mmap_flush = strtol(str, NULL, 0);
435 }
436
437 if (!opts->mmap_flush)
438 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
439
440 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
441 flush_max /= 4;
442 if (opts->mmap_flush > flush_max)
443 opts->mmap_flush = flush_max;
444
445 return 0;
446}
447
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300448#ifdef HAVE_ZSTD_SUPPORT
449static unsigned int comp_level_default = 1;
450
451static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
452{
453 struct record_opts *opts = opt->value;
454
455 if (unset) {
456 opts->comp_level = 0;
457 } else {
458 if (str)
459 opts->comp_level = strtol(str, NULL, 0);
460 if (!opts->comp_level)
461 opts->comp_level = comp_level_default;
462 }
463
464 return 0;
465}
466#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300467static unsigned int comp_level_max = 22;
468
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300469static int record__comp_enabled(struct record *rec)
470{
471 return rec->opts.comp_level > 0;
472}
473
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200474static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200475 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300476 struct perf_sample *sample __maybe_unused,
477 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200478{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300479 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200480 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200481}
482
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200483static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300484{
485 struct record *rec = to;
486
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300487 if (record__comp_enabled(rec)) {
488 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
489 bf = map->data;
490 }
491
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300492 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200493 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300494}
495
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300496static volatile int done;
497static volatile int signr = -1;
498static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000499
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300500static void sig_handler(int sig)
501{
502 if (sig == SIGCHLD)
503 child_finished = 1;
504 else
505 signr = sig;
506
507 done = 1;
508}
509
Wang Nana0748652016-11-26 07:03:28 +0000510static void sigsegv_handler(int sig)
511{
512 perf_hooks__recover();
513 sighandler_dump_stack(sig);
514}
515
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300516static void record__sig_exit(void)
517{
518 if (signr == -1)
519 return;
520
521 signal(signr, SIG_DFL);
522 raise(signr);
523}
524
Adrian Huntere31f0d02015-04-30 17:37:27 +0300525#ifdef HAVE_AUXTRACE_SUPPORT
526
Adrian Hunteref149c22015-04-09 18:53:45 +0300527static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200528 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300529 union perf_event *event, void *data1,
530 size_t len1, void *data2, size_t len2)
531{
532 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100533 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300534 size_t padding;
535 u8 pad[8] = {0};
536
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100537 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300538 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100539 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300540 int err;
541
542 file_offset = lseek(fd, 0, SEEK_CUR);
543 if (file_offset == -1)
544 return -1;
545 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
546 event, file_offset);
547 if (err)
548 return err;
549 }
550
Adrian Hunteref149c22015-04-09 18:53:45 +0300551 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
552 padding = (len1 + len2) & 7;
553 if (padding)
554 padding = 8 - padding;
555
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200556 record__write(rec, map, event, event->header.size);
557 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300558 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200559 record__write(rec, map, data2, len2);
560 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300561
562 return 0;
563}
564
565static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200566 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300567{
568 int ret;
569
Jiri Olsae035f4c2018-09-13 14:54:05 +0200570 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300571 record__process_auxtrace);
572 if (ret < 0)
573 return ret;
574
575 if (ret)
576 rec->samples++;
577
578 return 0;
579}
580
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300581static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200582 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300583{
584 int ret;
585
Jiri Olsae035f4c2018-09-13 14:54:05 +0200586 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300587 record__process_auxtrace,
588 rec->opts.auxtrace_snapshot_size);
589 if (ret < 0)
590 return ret;
591
592 if (ret)
593 rec->samples++;
594
595 return 0;
596}
597
598static int record__auxtrace_read_snapshot_all(struct record *rec)
599{
600 int i;
601 int rc = 0;
602
603 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200604 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300605
Jiri Olsae035f4c2018-09-13 14:54:05 +0200606 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300607 continue;
608
Jiri Olsae035f4c2018-09-13 14:54:05 +0200609 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300610 rc = -1;
611 goto out;
612 }
613 }
614out:
615 return rc;
616}
617
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300618static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300619{
620 pr_debug("Recording AUX area tracing snapshot\n");
621 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000622 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300623 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300624 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000625 trigger_error(&auxtrace_snapshot_trigger);
626 else
627 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300628 }
629}
630
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300631static int record__auxtrace_snapshot_exit(struct record *rec)
632{
633 if (trigger_is_error(&auxtrace_snapshot_trigger))
634 return 0;
635
636 if (!auxtrace_record__snapshot_started &&
637 auxtrace_record__snapshot_start(rec->itr))
638 return -1;
639
640 record__read_auxtrace_snapshot(rec, true);
641 if (trigger_is_error(&auxtrace_snapshot_trigger))
642 return -1;
643
644 return 0;
645}
646
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200647static int record__auxtrace_init(struct record *rec)
648{
649 int err;
650
651 if (!rec->itr) {
652 rec->itr = auxtrace_record__init(rec->evlist, &err);
653 if (err)
654 return err;
655 }
656
657 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
658 rec->opts.auxtrace_snapshot_opts);
659 if (err)
660 return err;
661
662 return auxtrace_parse_filters(rec->evlist);
663}
664
Adrian Huntere31f0d02015-04-30 17:37:27 +0300665#else
666
667static inline
668int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200669 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300670{
671 return 0;
672}
673
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300674static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300675void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
676 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300677{
678}
679
680static inline
681int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
682{
683 return 0;
684}
685
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300686static inline
687int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
688{
689 return 0;
690}
691
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200692static int record__auxtrace_init(struct record *rec __maybe_unused)
693{
694 return 0;
695}
696
Adrian Huntere31f0d02015-04-30 17:37:27 +0300697#endif
698
Wang Nancda57a82016-06-27 10:24:03 +0000699static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200700 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000701{
702 struct record_opts *opts = &rec->opts;
703 char msg[512];
704
Alexey Budankovf13de662019-01-22 20:50:57 +0300705 if (opts->affinity != PERF_AFFINITY_SYS)
706 cpu__setup_cpunode_map();
707
Wang Nan7a276ff2017-12-03 02:00:38 +0000708 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000709 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300710 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300711 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300712 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000713 if (errno == EPERM) {
714 pr_err("Permission error mapping pages.\n"
715 "Consider increasing "
716 "/proc/sys/kernel/perf_event_mlock_kb,\n"
717 "or try again with a smaller value of -m/--mmap_pages.\n"
718 "(current value: %u,%u)\n",
719 opts->mmap_pages, opts->auxtrace_mmap_pages);
720 return -errno;
721 } else {
722 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300723 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000724 if (errno)
725 return -errno;
726 else
727 return -EINVAL;
728 }
729 }
730 return 0;
731}
732
733static int record__mmap(struct record *rec)
734{
735 return record__mmap_evlist(rec, rec->evlist);
736}
737
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300738static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200739{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300740 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200741 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200742 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200743 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300744 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600745 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200746
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300747 /*
748 * For initial_delay we need to add a dummy event so that we can track
749 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
750 * real events, the ones asked by the user.
751 */
752 if (opts->initial_delay) {
753 if (perf_evlist__add_dummy(evlist))
754 return -ENOMEM;
755
756 pos = perf_evlist__first(evlist);
757 pos->tracking = 0;
758 pos = perf_evlist__last(evlist);
759 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200760 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300761 }
762
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300763 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100764
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300765 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200766try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200767 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300768 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900769 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300770 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300771 goto try_again;
772 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700773 if ((errno == EINVAL || errno == EBADF) &&
774 pos->leader != pos &&
775 pos->weak_group) {
776 pos = perf_evlist__reset_weak_group(evlist, pos);
777 goto try_again;
778 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300779 rc = -errno;
780 perf_evsel__open_strerror(pos, &opts->target,
781 errno, msg, sizeof(msg));
782 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600783 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300784 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800785
786 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800787 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200788
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300789 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300790 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300791 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300792 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600793 rc = -1;
794 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100795 }
796
Wang Nancda57a82016-06-27 10:24:03 +0000797 rc = record__mmap(rec);
798 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600799 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200800
Jiri Olsa563aecb2013-06-05 13:35:06 +0200801 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300802 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600803out:
804 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200805}
806
Namhyung Kime3d59112015-01-29 17:06:44 +0900807static int process_sample_event(struct perf_tool *tool,
808 union perf_event *event,
809 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200810 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900811 struct machine *machine)
812{
813 struct record *rec = container_of(tool, struct record, tool);
814
Jin Yao68588ba2017-12-08 21:13:42 +0800815 if (rec->evlist->first_sample_time == 0)
816 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900817
Jin Yao68588ba2017-12-08 21:13:42 +0800818 rec->evlist->last_sample_time = sample->time;
819
820 if (rec->buildid_all)
821 return 0;
822
823 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900824 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
825}
826
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300827static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200828{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200829 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200830
Jiri Olsa45112e82019-02-21 10:41:29 +0100831 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300832 return 0;
833
Namhyung Kim00dc8652014-11-04 10:14:32 +0900834 /*
835 * During this process, it'll load kernel map and replace the
836 * dso->long_name to a real pathname it found. In this case
837 * we prefer the vmlinux path like
838 * /lib/modules/3.16.4/build/vmlinux
839 *
840 * rather than build-id path (in debug directory).
841 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
842 */
843 symbol_conf.ignore_vmlinux_buildid = true;
844
Namhyung Kim61566812016-01-11 22:37:09 +0900845 /*
846 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800847 * so no need to process samples. But if timestamp_boundary is enabled,
848 * it still needs to walk on all samples to get the timestamps of
849 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900850 */
Jin Yao68588ba2017-12-08 21:13:42 +0800851 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900852 rec->tool.sample = NULL;
853
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300854 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200855}
856
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200857static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800858{
859 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200860 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800861 /*
862 *As for guest kernel when processing subcommand record&report,
863 *we arrange module mmap prior to guest kernel mmap and trigger
864 *a preload dso because default guest module symbols are loaded
865 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
866 *method is used to avoid symbol missing when the first addr is
867 *in module instead of in guest kernel.
868 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200869 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200870 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800871 if (err < 0)
872 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300873 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800874
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800875 /*
876 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
877 * have no _text sometimes.
878 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200879 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200880 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800881 if (err < 0)
882 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300883 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800884}
885
Frederic Weisbecker98402802010-05-02 22:05:29 +0200886static struct perf_event_header finished_round_event = {
887 .size = sizeof(struct perf_event_header),
888 .type = PERF_RECORD_FINISHED_ROUND,
889};
890
Alexey Budankovf13de662019-01-22 20:50:57 +0300891static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
892{
893 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
894 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
895 CPU_ZERO(&rec->affinity_mask);
896 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
897 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
898 }
899}
900
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300901static size_t process_comp_header(void *record, size_t increment)
902{
Jiri Olsa72932372019-08-28 15:57:16 +0200903 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300904 size_t size = sizeof(*event);
905
906 if (increment) {
907 event->header.size += increment;
908 return increment;
909 }
910
911 event->header.type = PERF_RECORD_COMPRESSED;
912 event->header.size = size;
913
914 return size;
915}
916
917static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
918 void *src, size_t src_size)
919{
920 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200921 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300922
923 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
924 max_record_size, process_comp_header);
925
926 session->bytes_transferred += src_size;
927 session->bytes_compressed += compressed;
928
929 return compressed;
930}
931
Jiri Olsa63503db2019-07-21 13:23:52 +0200932static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300933 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200934{
Jiri Olsadcabb502014-07-25 16:56:16 +0200935 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200936 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600937 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000938 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300939 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300940 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200941
Wang Nancb216862016-06-27 10:24:04 +0000942 if (!evlist)
943 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300944
Wang Nan0b72d692017-12-04 16:51:07 +0000945 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000946 if (!maps)
947 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000948
Wang Nan0b72d692017-12-04 16:51:07 +0000949 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000950 return 0;
951
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300952 if (record__aio_enabled(rec))
953 off = record__aio_get_pos(trace_fd);
954
Wang Nana4ea0ec2016-07-14 08:34:36 +0000955 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300956 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200957 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000958
Jiri Olsae035f4c2018-09-13 14:54:05 +0200959 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300960 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300961 if (synch) {
962 flush = map->flush;
963 map->flush = 1;
964 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300965 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300966 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300967 if (synch)
968 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300969 rc = -1;
970 goto out;
971 }
972 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300973 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300974 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300975 if (synch)
976 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300977 rc = -1;
978 goto out;
979 }
David Ahern8d3eca22012-08-26 12:24:47 -0600980 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300981 if (synch)
982 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600983 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300984
Jiri Olsae035f4c2018-09-13 14:54:05 +0200985 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
986 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300987 rc = -1;
988 goto out;
989 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200990 }
991
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300992 if (record__aio_enabled(rec))
993 record__aio_set_pos(trace_fd, off);
994
Jiri Olsadcabb502014-07-25 16:56:16 +0200995 /*
996 * Mark the round finished in case we wrote
997 * at least one event.
998 */
999 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001000 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001001
Wang Nan0b72d692017-12-04 16:51:07 +00001002 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001003 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001004out:
1005 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001006}
1007
Alexey Budankov470530b2019-03-18 20:40:26 +03001008static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001009{
1010 int err;
1011
Alexey Budankov470530b2019-03-18 20:40:26 +03001012 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001013 if (err)
1014 return err;
1015
Alexey Budankov470530b2019-03-18 20:40:26 +03001016 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001017}
1018
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001019static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001020{
David Ahern57706ab2013-11-06 11:41:34 -07001021 struct perf_session *session = rec->session;
1022 int feat;
1023
1024 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1025 perf_header__set_feat(&session->header, feat);
1026
1027 if (rec->no_buildid)
1028 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1029
Jiri Olsace9036a2019-07-21 13:24:23 +02001030 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001031 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1032
1033 if (!rec->opts.branch_stack)
1034 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001035
1036 if (!rec->opts.full_auxtrace)
1037 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001038
Alexey Budankovcf790512018-10-09 17:36:24 +03001039 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1040 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1041
Jiri Olsa258031c2019-03-08 14:47:39 +01001042 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001043 if (!record__comp_enabled(rec))
1044 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001045
Jiri Olsaffa517a2015-10-25 15:51:43 +01001046 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001047}
1048
Wang Nane1ab48b2016-02-26 09:32:10 +00001049static void
1050record__finish_output(struct record *rec)
1051{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001052 struct perf_data *data = &rec->data;
1053 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001054
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001055 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001056 return;
1057
1058 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001059 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001060
1061 if (!rec->no_buildid) {
1062 process_buildids(rec);
1063
1064 if (rec->buildid_all)
1065 dsos__hit_all(rec->session);
1066 }
1067 perf_session__write_header(rec->session, rec->evlist, fd, true);
1068
1069 return;
1070}
1071
Wang Nan4ea648a2016-07-14 08:34:47 +00001072static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001073{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001074 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001075 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001076
Wang Nan4ea648a2016-07-14 08:34:47 +00001077 if (rec->opts.tail_synthesize != tail)
1078 return 0;
1079
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001080 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1081 if (thread_map == NULL)
1082 return -1;
1083
1084 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001085 process_synthesized_event,
1086 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001087 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001088 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001089 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001090}
1091
Wang Nan4ea648a2016-07-14 08:34:47 +00001092static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001093
Wang Nanecfd7a92016-04-13 08:21:07 +00001094static int
1095record__switch_output(struct record *rec, bool at_exit)
1096{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001097 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001098 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001099 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001100
1101 /* Same Size: "2015122520103046"*/
1102 char timestamp[] = "InvalidTimestamp";
1103
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001104 record__aio_mmap_read_sync(rec);
1105
Wang Nan4ea648a2016-07-14 08:34:47 +00001106 record__synthesize(rec, true);
1107 if (target__none(&rec->opts.target))
1108 record__synthesize_workload(rec, true);
1109
Wang Nanecfd7a92016-04-13 08:21:07 +00001110 rec->samples = 0;
1111 record__finish_output(rec);
1112 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1113 if (err) {
1114 pr_err("Failed to get current timestamp\n");
1115 return -EINVAL;
1116 }
1117
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001118 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001119 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001120 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001121 if (fd >= 0 && !at_exit) {
1122 rec->bytes_written = 0;
1123 rec->session->header.data_size = 0;
1124 }
1125
1126 if (!quiet)
1127 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001128 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001129
Andi Kleen03724b22019-03-14 15:49:55 -07001130 if (rec->switch_output.num_files) {
1131 int n = rec->switch_output.cur_file + 1;
1132
1133 if (n >= rec->switch_output.num_files)
1134 n = 0;
1135 rec->switch_output.cur_file = n;
1136 if (rec->switch_output.filenames[n]) {
1137 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001138 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001139 }
1140 rec->switch_output.filenames[n] = new_filename;
1141 } else {
1142 free(new_filename);
1143 }
1144
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001145 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001146 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001147 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001148
Wang Nanbe7b0c92016-04-20 18:59:54 +00001149 /*
1150 * In 'perf record --switch-output' without -a,
1151 * record__synthesize() in record__switch_output() won't
1152 * generate tracking events because there's no thread_map
1153 * in evlist. Which causes newly created perf.data doesn't
1154 * contain map and comm information.
1155 * Create a fake thread_map and directly call
1156 * perf_event__synthesize_thread_map() for those events.
1157 */
1158 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001159 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001160 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001161 return fd;
1162}
1163
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001164static volatile int workload_exec_errno;
1165
1166/*
1167 * perf_evlist__prepare_workload will send a SIGUSR1
1168 * if the fork fails, since we asked by setting its
1169 * want_signal to true.
1170 */
Namhyung Kim45604712014-05-12 09:47:24 +09001171static void workload_exec_failed_signal(int signo __maybe_unused,
1172 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001173 void *ucontext __maybe_unused)
1174{
1175 workload_exec_errno = info->si_value.sival_int;
1176 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001177 child_finished = 1;
1178}
1179
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001180static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001181static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001182
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001183int __weak
1184perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1185 struct perf_tool *tool __maybe_unused,
1186 perf_event__handler_t process __maybe_unused,
1187 struct machine *machine __maybe_unused)
1188{
1189 return 0;
1190}
1191
Wang Nanee667f92016-06-27 10:24:05 +00001192static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001193perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001194{
Wang Nanb2cb6152016-07-14 08:34:39 +00001195 if (evlist) {
1196 if (evlist->mmap && evlist->mmap[0].base)
1197 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001198 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1199 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001200 }
Wang Nanee667f92016-06-27 10:24:05 +00001201 return NULL;
1202}
1203
Wang Nanc45628b2016-05-24 02:28:59 +00001204static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1205{
Wang Nanee667f92016-06-27 10:24:05 +00001206 const struct perf_event_mmap_page *pc;
1207
1208 pc = perf_evlist__pick_pc(rec->evlist);
1209 if (pc)
1210 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001211 return NULL;
1212}
1213
Wang Nan4ea648a2016-07-14 08:34:47 +00001214static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001215{
1216 struct perf_session *session = rec->session;
1217 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001218 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001219 struct record_opts *opts = &rec->opts;
1220 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001221 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001222 int err = 0;
1223
Wang Nan4ea648a2016-07-14 08:34:47 +00001224 if (rec->opts.tail_synthesize != tail)
1225 return 0;
1226
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001227 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001228 /*
1229 * We need to synthesize events first, because some
1230 * features works on top of them (on report side).
1231 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001232 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001233 process_synthesized_event);
1234 if (err < 0) {
1235 pr_err("Couldn't synthesize attrs.\n");
1236 goto out;
1237 }
1238
Jiri Olsaa2015512018-03-14 10:22:04 +01001239 err = perf_event__synthesize_features(tool, session, rec->evlist,
1240 process_synthesized_event);
1241 if (err < 0) {
1242 pr_err("Couldn't synthesize features.\n");
1243 return err;
1244 }
1245
Jiri Olsace9036a2019-07-21 13:24:23 +02001246 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001247 /*
1248 * FIXME err <= 0 here actually means that
1249 * there were no tracepoints so its not really
1250 * an error, just that we don't need to
1251 * synthesize anything. We really have to
1252 * return this more properly and also
1253 * propagate errors that now are calling die()
1254 */
1255 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1256 process_synthesized_event);
1257 if (err <= 0) {
1258 pr_err("Couldn't record tracing data.\n");
1259 goto out;
1260 }
1261 rec->bytes_written += err;
1262 }
1263 }
1264
Wang Nanc45628b2016-05-24 02:28:59 +00001265 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001266 process_synthesized_event, machine);
1267 if (err)
1268 goto out;
1269
Wang Nanc45c86e2016-02-26 09:32:07 +00001270 if (rec->opts.full_auxtrace) {
1271 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1272 session, process_synthesized_event);
1273 if (err)
1274 goto out;
1275 }
1276
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001277 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1278 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1279 machine);
1280 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1281 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1282 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001283
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001284 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1285 machine);
1286 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1287 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1288 "Check /proc/modules permission or run as root.\n");
1289 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001290
1291 if (perf_guest) {
1292 machines__process_guests(&session->machines,
1293 perf_event__synthesize_guest_os, tool);
1294 }
1295
Andi Kleenbfd8f722017-11-17 13:42:58 -08001296 err = perf_event__synthesize_extra_attr(&rec->tool,
1297 rec->evlist,
1298 process_synthesized_event,
1299 data->is_pipe);
1300 if (err)
1301 goto out;
1302
Jiri Olsa03617c22019-07-21 13:24:42 +02001303 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001304 process_synthesized_event,
1305 NULL);
1306 if (err < 0) {
1307 pr_err("Couldn't synthesize thread map.\n");
1308 return err;
1309 }
1310
Jiri Olsaf72f9012019-07-21 13:24:41 +02001311 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001312 process_synthesized_event, NULL);
1313 if (err < 0) {
1314 pr_err("Couldn't synthesize cpu map.\n");
1315 return err;
1316 }
1317
Song Liue5416952019-03-11 22:30:41 -07001318 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001319 machine, opts);
1320 if (err < 0)
1321 pr_warning("Couldn't synthesize bpf events.\n");
1322
Jiri Olsa03617c22019-07-21 13:24:42 +02001323 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001324 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001325 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001326out:
1327 return err;
1328}
1329
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001330static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001331{
David Ahern57706ab2013-11-06 11:41:34 -07001332 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001333 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001334 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001335 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001336 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001337 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001338 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001339 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001340 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001341 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001342 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001343 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001344
Namhyung Kim45604712014-05-12 09:47:24 +09001345 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001346 signal(SIGCHLD, sig_handler);
1347 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001348 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001349 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001350
Hari Bathinif3b36142017-03-08 02:11:43 +05301351 if (rec->opts.record_namespaces)
1352 tool->namespace_events = true;
1353
Jiri Olsadc0c6122017-01-09 10:51:58 +01001354 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001355 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001356 if (rec->opts.auxtrace_snapshot_mode)
1357 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001358 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001359 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001360 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001361 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001362 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001363
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001364 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001365 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001366 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001367 return -1;
1368 }
1369
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001370 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001371 rec->session = session;
1372
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001373 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1374 pr_err("Compression initialization failed.\n");
1375 return -1;
1376 }
1377
1378 session->header.env.comp_type = PERF_COMP_ZSTD;
1379 session->header.env.comp_level = rec->opts.comp_level;
1380
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001381 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001382
Alexey Budankovcf790512018-10-09 17:36:24 +03001383 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1384 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1385
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001386 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001387 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001388 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001389 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001390 if (err < 0) {
1391 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001392 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001393 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001394 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001395 }
1396
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001397 /*
1398 * If we have just single event and are sending data
1399 * through pipe, we need to force the ids allocation,
1400 * because we synthesize event name through the pipe
1401 * and need the id for that.
1402 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001403 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001404 rec->opts.sample_id = true;
1405
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001406 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001407 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001408 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001409 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001410 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001411
Wang Nan8690a2a2016-02-22 09:10:32 +00001412 err = bpf__apply_obj_config();
1413 if (err) {
1414 char errbuf[BUFSIZ];
1415
1416 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1417 pr_err("ERROR: Apply config to BPF failed: %s\n",
1418 errbuf);
1419 goto out_child;
1420 }
1421
Adrian Huntercca84822015-08-19 17:29:21 +03001422 /*
1423 * Normally perf_session__new would do this, but it doesn't have the
1424 * evlist.
1425 */
1426 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1427 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1428 rec->tool.ordered_events = false;
1429 }
1430
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001431 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001432 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1433
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001434 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001435 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001436 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001437 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001438 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001439 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001440 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001441 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001442 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001443
David Ahernd3665492012-02-06 15:27:52 -07001444 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001445 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001446 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001447 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001448 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001449 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001450 }
1451
Song Liud56354d2019-03-11 22:30:51 -07001452 if (!opts->no_bpf_event)
1453 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1454
Song Liu657ee552019-03-11 22:30:50 -07001455 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1456 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1457 opts->no_bpf_event = true;
1458 }
1459
Wang Nan4ea648a2016-07-14 08:34:47 +00001460 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001461 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001462 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001463
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001464 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001465 struct sched_param param;
1466
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001467 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001468 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001469 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001470 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001471 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001472 }
1473 }
1474
Jiri Olsa774cb492012-11-12 18:34:01 +01001475 /*
1476 * When perf is starting the traced process, all the events
1477 * (apart from group members) have enable_on_exec=1 set,
1478 * so don't spoil it by prematurely enabling them.
1479 */
Andi Kleen6619a532014-01-11 13:38:27 -08001480 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001481 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001482
Peter Zijlstra856e9662009-12-16 17:55:55 +01001483 /*
1484 * Let the child rip
1485 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001486 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001487 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001488 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301489 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001490
1491 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1492 if (event == NULL) {
1493 err = -ENOMEM;
1494 goto out_child;
1495 }
1496
Namhyung Kime803cf92015-09-22 09:24:55 +09001497 /*
1498 * Some H/W events are generated before COMM event
1499 * which is emitted during exec(), so perf script
1500 * cannot see a correct process name for those events.
1501 * Synthesize COMM event to prevent it.
1502 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301503 tgid = perf_event__synthesize_comm(tool, event,
1504 rec->evlist->workload.pid,
1505 process_synthesized_event,
1506 machine);
1507 free(event);
1508
1509 if (tgid == -1)
1510 goto out_child;
1511
1512 event = malloc(sizeof(event->namespaces) +
1513 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1514 machine->id_hdr_size);
1515 if (event == NULL) {
1516 err = -ENOMEM;
1517 goto out_child;
1518 }
1519
1520 /*
1521 * Synthesize NAMESPACES event for the command specified.
1522 */
1523 perf_event__synthesize_namespaces(tool, event,
1524 rec->evlist->workload.pid,
1525 tgid, process_synthesized_event,
1526 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001527 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001528
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001529 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001530 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001531
Andi Kleen6619a532014-01-11 13:38:27 -08001532 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001533 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001534 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001535 }
1536
Wang Nan5f9cf592016-04-20 18:59:49 +00001537 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001538 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001539 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001540 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001541 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001542
Wang Nan057374642016-07-14 08:34:43 +00001543 /*
1544 * rec->evlist->bkw_mmap_state is possible to be
1545 * BKW_MMAP_EMPTY here: when done == true and
1546 * hits != rec->samples in previous round.
1547 *
1548 * perf_evlist__toggle_bkw_mmap ensure we never
1549 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1550 */
1551 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1552 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1553
Alexey Budankov470530b2019-03-18 20:40:26 +03001554 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001555 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001556 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001557 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001558 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001559 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001560
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001561 if (auxtrace_record__snapshot_started) {
1562 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001563 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001564 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001565 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001566 pr_err("AUX area tracing snapshot failed\n");
1567 err = -1;
1568 goto out_child;
1569 }
1570 }
1571
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001572 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001573 /*
1574 * If switch_output_trigger is hit, the data in
1575 * overwritable ring buffer should have been collected,
1576 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1577 *
1578 * If SIGUSR2 raise after or during record__mmap_read_all(),
1579 * record__mmap_read_all() didn't collect data from
1580 * overwritable ring buffer. Read again.
1581 */
1582 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1583 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001584 trigger_ready(&switch_output_trigger);
1585
Wang Nan057374642016-07-14 08:34:43 +00001586 /*
1587 * Reenable events in overwrite ring buffer after
1588 * record__mmap_read_all(): we should have collected
1589 * data from it.
1590 */
1591 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1592
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001593 if (!quiet)
1594 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1595 waking);
1596 waking = 0;
1597 fd = record__switch_output(rec, false);
1598 if (fd < 0) {
1599 pr_err("Failed to switch to new file\n");
1600 trigger_error(&switch_output_trigger);
1601 err = fd;
1602 goto out_child;
1603 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001604
1605 /* re-arm the alarm */
1606 if (rec->switch_output.time)
1607 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001608 }
1609
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001610 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001611 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001612 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001613 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001614 /*
1615 * Propagate error, only if there's any. Ignore positive
1616 * number of returned events and interrupt error.
1617 */
1618 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001619 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001620 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001621
1622 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1623 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001624 }
1625
Jiri Olsa774cb492012-11-12 18:34:01 +01001626 /*
1627 * When perf is starting the traced process, at the end events
1628 * die with the process and we wait for that. Thus no need to
1629 * disable events in this case.
1630 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001631 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001632 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001633 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001634 disabled = true;
1635 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001636 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001637
Wang Nan5f9cf592016-04-20 18:59:49 +00001638 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001639 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001640
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001641 if (opts->auxtrace_snapshot_on_exit)
1642 record__auxtrace_snapshot_exit(rec);
1643
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001644 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001645 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001646 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001647 pr_err("Workload failed: %s\n", emsg);
1648 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001649 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001650 }
1651
Namhyung Kime3d59112015-01-29 17:06:44 +09001652 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001653 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001654
Wang Nan4ea648a2016-07-14 08:34:47 +00001655 if (target__none(&rec->opts.target))
1656 record__synthesize_workload(rec, true);
1657
Namhyung Kim45604712014-05-12 09:47:24 +09001658out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001659 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001660 record__aio_mmap_read_sync(rec);
1661
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001662 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1663 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1664 session->header.env.comp_ratio = ratio + 0.5;
1665 }
1666
Namhyung Kim45604712014-05-12 09:47:24 +09001667 if (forks) {
1668 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001669
Namhyung Kim45604712014-05-12 09:47:24 +09001670 if (!child_finished)
1671 kill(rec->evlist->workload.pid, SIGTERM);
1672
1673 wait(&exit_status);
1674
1675 if (err < 0)
1676 status = err;
1677 else if (WIFEXITED(exit_status))
1678 status = WEXITSTATUS(exit_status);
1679 else if (WIFSIGNALED(exit_status))
1680 signr = WTERMSIG(exit_status);
1681 } else
1682 status = err;
1683
Wang Nan4ea648a2016-07-14 08:34:47 +00001684 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001685 /* this will be recalculated during process_buildids() */
1686 rec->samples = 0;
1687
Wang Nanecfd7a92016-04-13 08:21:07 +00001688 if (!err) {
1689 if (!rec->timestamp_filename) {
1690 record__finish_output(rec);
1691 } else {
1692 fd = record__switch_output(rec, true);
1693 if (fd < 0) {
1694 status = fd;
1695 goto out_delete_session;
1696 }
1697 }
1698 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001699
Wang Nana0748652016-11-26 07:03:28 +00001700 perf_hooks__invoke_record_end();
1701
Namhyung Kime3d59112015-01-29 17:06:44 +09001702 if (!err && !quiet) {
1703 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001704 const char *postfix = rec->timestamp_filename ?
1705 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001706
Adrian Hunteref149c22015-04-09 18:53:45 +03001707 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001708 scnprintf(samples, sizeof(samples),
1709 " (%" PRIu64 " samples)", rec->samples);
1710 else
1711 samples[0] = '\0';
1712
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001713 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001714 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001715 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001716 if (ratio) {
1717 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1718 rec->session->bytes_transferred / 1024.0 / 1024.0,
1719 ratio);
1720 }
1721 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001722 }
1723
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001724out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001725 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001726 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001727
1728 if (!opts->no_bpf_event)
1729 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001730 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001731}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001732
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001733static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001734{
Kan Liangaad2b212015-01-05 13:23:04 -05001735 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001736
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001737 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001738
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001739 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001740 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001741 callchain->dump_size);
1742}
1743
1744int record_opts__parse_callchain(struct record_opts *record,
1745 struct callchain_param *callchain,
1746 const char *arg, bool unset)
1747{
1748 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001749 callchain->enabled = !unset;
1750
1751 /* --no-call-graph */
1752 if (unset) {
1753 callchain->record_mode = CALLCHAIN_NONE;
1754 pr_debug("callchain: disabled\n");
1755 return 0;
1756 }
1757
1758 ret = parse_callchain_record_opt(arg, callchain);
1759 if (!ret) {
1760 /* Enable data address sampling for DWARF unwind. */
1761 if (callchain->record_mode == CALLCHAIN_DWARF)
1762 record->sample_address = true;
1763 callchain_debug(callchain);
1764 }
1765
1766 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001767}
1768
Kan Liangc421e802015-07-29 05:42:12 -04001769int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001770 const char *arg,
1771 int unset)
1772{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001773 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001774}
1775
Kan Liangc421e802015-07-29 05:42:12 -04001776int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001777 const char *arg __maybe_unused,
1778 int unset __maybe_unused)
1779{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001780 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001781
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001782 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001783
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001784 if (callchain->record_mode == CALLCHAIN_NONE)
1785 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001786
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001787 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001788 return 0;
1789}
1790
Jiri Olsaeb853e82014-02-03 12:44:42 +01001791static int perf_record_config(const char *var, const char *value, void *cb)
1792{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001793 struct record *rec = cb;
1794
1795 if (!strcmp(var, "record.build-id")) {
1796 if (!strcmp(value, "cache"))
1797 rec->no_buildid_cache = false;
1798 else if (!strcmp(value, "no-cache"))
1799 rec->no_buildid_cache = true;
1800 else if (!strcmp(value, "skip"))
1801 rec->no_buildid = true;
1802 else
1803 return -1;
1804 return 0;
1805 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001806 if (!strcmp(var, "record.call-graph")) {
1807 var = "call-graph.record-mode";
1808 return perf_default_config(var, value, cb);
1809 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001810#ifdef HAVE_AIO_SUPPORT
1811 if (!strcmp(var, "record.aio")) {
1812 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1813 if (!rec->opts.nr_cblocks)
1814 rec->opts.nr_cblocks = nr_cblocks_default;
1815 }
1816#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001817
Yisheng Xiecff17202018-03-12 19:25:57 +08001818 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001819}
1820
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001821struct clockid_map {
1822 const char *name;
1823 int clockid;
1824};
1825
1826#define CLOCKID_MAP(n, c) \
1827 { .name = n, .clockid = (c), }
1828
1829#define CLOCKID_END { .name = NULL, }
1830
1831
1832/*
1833 * Add the missing ones, we need to build on many distros...
1834 */
1835#ifndef CLOCK_MONOTONIC_RAW
1836#define CLOCK_MONOTONIC_RAW 4
1837#endif
1838#ifndef CLOCK_BOOTTIME
1839#define CLOCK_BOOTTIME 7
1840#endif
1841#ifndef CLOCK_TAI
1842#define CLOCK_TAI 11
1843#endif
1844
1845static const struct clockid_map clockids[] = {
1846 /* available for all events, NMI safe */
1847 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1848 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1849
1850 /* available for some events */
1851 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1852 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1853 CLOCKID_MAP("tai", CLOCK_TAI),
1854
1855 /* available for the lazy */
1856 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1857 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1858 CLOCKID_MAP("real", CLOCK_REALTIME),
1859 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1860
1861 CLOCKID_END,
1862};
1863
Alexey Budankovcf790512018-10-09 17:36:24 +03001864static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1865{
1866 struct timespec res;
1867
1868 *res_ns = 0;
1869 if (!clock_getres(clk_id, &res))
1870 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1871 else
1872 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1873
1874 return 0;
1875}
1876
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001877static int parse_clockid(const struct option *opt, const char *str, int unset)
1878{
1879 struct record_opts *opts = (struct record_opts *)opt->value;
1880 const struct clockid_map *cm;
1881 const char *ostr = str;
1882
1883 if (unset) {
1884 opts->use_clockid = 0;
1885 return 0;
1886 }
1887
1888 /* no arg passed */
1889 if (!str)
1890 return 0;
1891
1892 /* no setting it twice */
1893 if (opts->use_clockid)
1894 return -1;
1895
1896 opts->use_clockid = true;
1897
1898 /* if its a number, we're done */
1899 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001900 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001901
1902 /* allow a "CLOCK_" prefix to the name */
1903 if (!strncasecmp(str, "CLOCK_", 6))
1904 str += 6;
1905
1906 for (cm = clockids; cm->name; cm++) {
1907 if (!strcasecmp(str, cm->name)) {
1908 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001909 return get_clockid_res(opts->clockid,
1910 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001911 }
1912 }
1913
1914 opts->use_clockid = false;
1915 ui__warning("unknown clockid %s, check man page\n", ostr);
1916 return -1;
1917}
1918
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001919static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1920{
1921 struct record_opts *opts = (struct record_opts *)opt->value;
1922
1923 if (unset || !str)
1924 return 0;
1925
1926 if (!strcasecmp(str, "node"))
1927 opts->affinity = PERF_AFFINITY_NODE;
1928 else if (!strcasecmp(str, "cpu"))
1929 opts->affinity = PERF_AFFINITY_CPU;
1930
1931 return 0;
1932}
1933
Adrian Huntere9db1312015-04-09 18:53:46 +03001934static int record__parse_mmap_pages(const struct option *opt,
1935 const char *str,
1936 int unset __maybe_unused)
1937{
1938 struct record_opts *opts = opt->value;
1939 char *s, *p;
1940 unsigned int mmap_pages;
1941 int ret;
1942
1943 if (!str)
1944 return -EINVAL;
1945
1946 s = strdup(str);
1947 if (!s)
1948 return -ENOMEM;
1949
1950 p = strchr(s, ',');
1951 if (p)
1952 *p = '\0';
1953
1954 if (*s) {
1955 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1956 if (ret)
1957 goto out_free;
1958 opts->mmap_pages = mmap_pages;
1959 }
1960
1961 if (!p) {
1962 ret = 0;
1963 goto out_free;
1964 }
1965
1966 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1967 if (ret)
1968 goto out_free;
1969
1970 opts->auxtrace_mmap_pages = mmap_pages;
1971
1972out_free:
1973 free(s);
1974 return ret;
1975}
1976
Jiri Olsa0c582442017-01-09 10:51:59 +01001977static void switch_output_size_warn(struct record *rec)
1978{
1979 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1980 struct switch_output *s = &rec->switch_output;
1981
1982 wakeup_size /= 2;
1983
1984 if (s->size < wakeup_size) {
1985 char buf[100];
1986
1987 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1988 pr_warning("WARNING: switch-output data size lower than "
1989 "wakeup kernel buffer size (%s) "
1990 "expect bigger perf.data sizes\n", buf);
1991 }
1992}
1993
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001994static int switch_output_setup(struct record *rec)
1995{
1996 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001997 static struct parse_tag tags_size[] = {
1998 { .tag = 'B', .mult = 1 },
1999 { .tag = 'K', .mult = 1 << 10 },
2000 { .tag = 'M', .mult = 1 << 20 },
2001 { .tag = 'G', .mult = 1 << 30 },
2002 { .tag = 0 },
2003 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002004 static struct parse_tag tags_time[] = {
2005 { .tag = 's', .mult = 1 },
2006 { .tag = 'm', .mult = 60 },
2007 { .tag = 'h', .mult = 60*60 },
2008 { .tag = 'd', .mult = 60*60*24 },
2009 { .tag = 0 },
2010 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002011 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002012
2013 if (!s->set)
2014 return 0;
2015
2016 if (!strcmp(s->str, "signal")) {
2017 s->signal = true;
2018 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002019 goto enabled;
2020 }
2021
2022 val = parse_tag_value(s->str, tags_size);
2023 if (val != (unsigned long) -1) {
2024 s->size = val;
2025 pr_debug("switch-output with %s size threshold\n", s->str);
2026 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002027 }
2028
Jiri Olsabfacbe32017-01-09 10:52:00 +01002029 val = parse_tag_value(s->str, tags_time);
2030 if (val != (unsigned long) -1) {
2031 s->time = val;
2032 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2033 s->str, s->time);
2034 goto enabled;
2035 }
2036
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002037 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002038
2039enabled:
2040 rec->timestamp_filename = true;
2041 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002042
2043 if (s->size && !rec->opts.no_buffering)
2044 switch_output_size_warn(rec);
2045
Jiri Olsadc0c6122017-01-09 10:51:58 +01002046 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002047}
2048
Namhyung Kime5b2c202014-10-23 00:15:46 +09002049static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002050 "perf record [<options>] [<command>]",
2051 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002052 NULL
2053};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002054const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002055
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002056/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002057 * XXX Ideally would be local to cmd_record() and passed to a record__new
2058 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002059 * after cmd_record() exits, but since record_options need to be accessible to
2060 * builtin-script, leave it here.
2061 *
2062 * At least we don't ouch it in all the other functions here directly.
2063 *
2064 * Just say no to tons of global variables, sigh.
2065 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002066static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002067 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002068 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002069 .mmap_pages = UINT_MAX,
2070 .user_freq = UINT_MAX,
2071 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002072 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002073 .target = {
2074 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002075 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002076 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002077 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002078 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002079 .tool = {
2080 .sample = process_sample_event,
2081 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002082 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002083 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302084 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002085 .mmap = perf_event__process_mmap,
2086 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002087 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002088 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002089};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002090
Namhyung Kim76a26542015-10-22 23:28:32 +09002091const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2092 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002093
Wang Nan0aab2132016-06-16 08:02:41 +00002094static bool dry_run;
2095
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002096/*
2097 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2098 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002099 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002100 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2101 * using pipes, etc.
2102 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002103static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002104 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002105 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002106 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002107 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002108 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002109 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2110 NULL, "don't record events from perf itself",
2111 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002112 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002113 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002114 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002115 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002116 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002117 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002118 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002119 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002120 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002121 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002122 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002123 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002124 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002125 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002126 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002127 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002128 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002129 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2130 &record.opts.no_inherit_set,
2131 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002132 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2133 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002134 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002135 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002136 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2137 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002138 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2139 "profile at this frequency",
2140 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002141 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2142 "number of mmap data pages and AUX area tracing mmap pages",
2143 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002144 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2145 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2146 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002147 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002148 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002149 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002150 NULL, "enables call-graph recording" ,
2151 &record_callchain_opt),
2152 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002153 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002154 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002155 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002156 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002157 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002158 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002159 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002160 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002161 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2162 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002163 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002164 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2165 &record.opts.sample_time_set,
2166 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002167 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2168 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002169 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002170 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002171 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2172 &record.no_buildid_cache_set,
2173 "do not update the buildid cache"),
2174 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2175 &record.no_buildid_set,
2176 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002177 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002178 "monitor event in cgroup name only",
2179 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002180 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002181 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002182 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2183 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002184
2185 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2186 "branch any", "sample any taken branches",
2187 parse_branch_stack),
2188
2189 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2190 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002191 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002192 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2193 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002194 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2195 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002196 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2197 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002198 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2199 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002200 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002201 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2202 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002203 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002204 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2205 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002206 OPT_CALLBACK('k', "clockid", &record.opts,
2207 "clockid", "clockid to use for events, see clock_gettime()",
2208 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002209 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2210 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002211 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002212 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302213 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2214 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002215 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2216 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002217 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2218 "Configure all used events to run in kernel space.",
2219 PARSE_OPT_EXCLUSIVE),
2220 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2221 "Configure all used events to run in user space.",
2222 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002223 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2224 "collect kernel callchains"),
2225 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2226 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002227 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2228 "clang binary to use for compiling BPF scriptlets"),
2229 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2230 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002231 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2232 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002233 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2234 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002235 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2236 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002237 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2238 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002239 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002240 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2241 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002242 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002243 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2244 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002245 OPT_BOOLEAN(0, "dry-run", &dry_run,
2246 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002247#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002248 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2249 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002250 record__aio_parse),
2251#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002252 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2253 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2254 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002255#ifdef HAVE_ZSTD_SUPPORT
2256 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2257 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2258 record__parse_comp_level),
2259#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002260 OPT_END()
2261};
2262
Namhyung Kime5b2c202014-10-23 00:15:46 +09002263struct option *record_options = __record_options;
2264
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002265int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002266{
Adrian Hunteref149c22015-04-09 18:53:45 +03002267 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002268 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002269 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002270
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002271 setlocale(LC_ALL, "");
2272
Wang Nan48e1cab2015-12-14 10:39:22 +00002273#ifndef HAVE_LIBBPF_SUPPORT
2274# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2275 set_nobuild('\0', "clang-path", true);
2276 set_nobuild('\0', "clang-opt", true);
2277# undef set_nobuild
2278#endif
2279
He Kuang7efe0e02015-12-14 10:39:23 +00002280#ifndef HAVE_BPF_PROLOGUE
2281# if !defined (HAVE_DWARF_SUPPORT)
2282# define REASON "NO_DWARF=1"
2283# elif !defined (HAVE_LIBBPF_SUPPORT)
2284# define REASON "NO_LIBBPF=1"
2285# else
2286# define REASON "this architecture doesn't support BPF prologue"
2287# endif
2288# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2289 set_nobuild('\0', "vmlinux", true);
2290# undef set_nobuild
2291# undef REASON
2292#endif
2293
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002294 CPU_ZERO(&rec->affinity_mask);
2295 rec->opts.affinity = PERF_AFFINITY_SYS;
2296
Jiri Olsa0f98b112019-07-21 13:23:55 +02002297 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002298 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002299 return -ENOMEM;
2300
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002301 err = perf_config(perf_record_config, rec);
2302 if (err)
2303 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002304
Tom Zanussibca647a2010-11-10 08:11:30 -06002305 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002306 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002307 if (quiet)
2308 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002309
2310 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002311 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002312 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002313
Namhyung Kimbea03402012-04-26 14:15:15 +09002314 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002315 usage_with_options_msg(record_usage, record_options,
2316 "cgroup monitoring only available in system-wide mode");
2317
Stephane Eranian023695d2011-02-14 11:20:01 +02002318 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002319
2320 if (rec->opts.comp_level != 0) {
2321 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2322 rec->no_buildid = true;
2323 }
2324
Adrian Hunterb757bb02015-07-21 12:44:04 +03002325 if (rec->opts.record_switch_events &&
2326 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002327 ui__error("kernel does not support recording context switch events\n");
2328 parse_options_usage(record_usage, record_options, "switch-events", 0);
2329 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002330 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002331
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002332 if (switch_output_setup(rec)) {
2333 parse_options_usage(record_usage, record_options, "switch-output", 0);
2334 return -EINVAL;
2335 }
2336
Jiri Olsabfacbe32017-01-09 10:52:00 +01002337 if (rec->switch_output.time) {
2338 signal(SIGALRM, alarm_sig_handler);
2339 alarm(rec->switch_output.time);
2340 }
2341
Andi Kleen03724b22019-03-14 15:49:55 -07002342 if (rec->switch_output.num_files) {
2343 rec->switch_output.filenames = calloc(sizeof(char *),
2344 rec->switch_output.num_files);
2345 if (!rec->switch_output.filenames)
2346 return -EINVAL;
2347 }
2348
Adrian Hunter1b36c032016-09-23 17:38:39 +03002349 /*
2350 * Allow aliases to facilitate the lookup of symbols for address
2351 * filters. Refer to auxtrace_parse_filters().
2352 */
2353 symbol_conf.allow_aliases = true;
2354
2355 symbol__init(NULL);
2356
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002357 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002358 if (err)
2359 goto out;
2360
Wang Nan0aab2132016-06-16 08:02:41 +00002361 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002362 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002363
Wang Nand7888572016-04-08 15:07:24 +00002364 err = bpf__setup_stdout(rec->evlist);
2365 if (err) {
2366 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2367 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2368 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002369 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002370 }
2371
Adrian Hunteref149c22015-04-09 18:53:45 +03002372 err = -ENOMEM;
2373
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002374 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002375 pr_warning(
2376"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
Igor Lubashevd06e5fa2019-08-26 21:39:16 -04002377"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002378"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2379"file is not found in the buildid cache or in the vmlinux path.\n\n"
2380"Samples in kernel modules won't be resolved at all.\n\n"
2381"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2382"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002383
Wang Nan0c1d46a2016-04-20 18:59:52 +00002384 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002385 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002386 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002387 /*
2388 * In 'perf record --switch-output', disable buildid
2389 * generation by default to reduce data file switching
2390 * overhead. Still generate buildid if they are required
2391 * explicitly using
2392 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002393 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002394 * --no-no-buildid-cache
2395 *
2396 * Following code equals to:
2397 *
2398 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2399 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2400 * disable_buildid_cache();
2401 */
2402 bool disable = true;
2403
2404 if (rec->no_buildid_set && !rec->no_buildid)
2405 disable = false;
2406 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2407 disable = false;
2408 if (disable) {
2409 rec->no_buildid = true;
2410 rec->no_buildid_cache = true;
2411 disable_buildid_cache();
2412 }
2413 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002414
Wang Nan4ea648a2016-07-14 08:34:47 +00002415 if (record.opts.overwrite)
2416 record.opts.tail_synthesize = true;
2417
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002418 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002419 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002420 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002421 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002422 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002423
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002424 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2425 rec->opts.no_inherit = true;
2426
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002427 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002428 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002429 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002430 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002431 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002432
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002433 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002434 if (err) {
2435 int saved_errno = errno;
2436
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002437 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002438 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002439
2440 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002441 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002442 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002443
Mengting Zhangca800062017-12-13 15:01:53 +08002444 /* Enable ignoring missing threads when -u/-p option is defined. */
2445 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002446
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002447 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002448 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002449 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002450
Adrian Hunteref149c22015-04-09 18:53:45 +03002451 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2452 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002453 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002454
Namhyung Kim61566812016-01-11 22:37:09 +09002455 /*
2456 * We take all buildids when the file contains
2457 * AUX area tracing data because we do not decode the
2458 * trace because it would take too long.
2459 */
2460 if (rec->opts.full_auxtrace)
2461 rec->buildid_all = true;
2462
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002463 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002464 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002465 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002466 }
2467
Alexey Budankov93f20c02018-11-06 12:07:19 +03002468 if (rec->opts.nr_cblocks > nr_cblocks_max)
2469 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002470 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002471
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002472 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002473 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002474
Alexey Budankov51255a82019-03-18 20:42:19 +03002475 if (rec->opts.comp_level > comp_level_max)
2476 rec->opts.comp_level = comp_level_max;
2477 pr_debug("comp level: %d\n", rec->opts.comp_level);
2478
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002479 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002480out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002481 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002482 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002483 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002484 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002485}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002486
2487static void snapshot_sig_handler(int sig __maybe_unused)
2488{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002489 struct record *rec = &record;
2490
Wang Nan5f9cf592016-04-20 18:59:49 +00002491 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2492 trigger_hit(&auxtrace_snapshot_trigger);
2493 auxtrace_record__snapshot_started = 1;
2494 if (auxtrace_record__snapshot_start(record.itr))
2495 trigger_error(&auxtrace_snapshot_trigger);
2496 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002497
Jiri Olsadc0c6122017-01-09 10:51:58 +01002498 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002499 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002500}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002501
2502static void alarm_sig_handler(int sig __maybe_unused)
2503{
2504 struct record *rec = &record;
2505
2506 if (switch_output_time(rec))
2507 trigger_hit(&switch_output_trigger);
2508}