blob: 4bd11c918e7364f0a35199596d4dd673fe4be363 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030041#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030046#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020047
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030048#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030049#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030050#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030051#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020052#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020053#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030054#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030055#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030056#include <sys/wait.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030057#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030058#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030059#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030060
Jiri Olsa1b43b702017-01-09 10:51:56 +010061struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010062 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010063 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010064 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010065 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010066 const char *str;
67 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070068 char **filenames;
69 int num_files;
70 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010071};
72
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030073struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020074 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030075 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010077 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030078 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020079 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020080 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020082 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000083 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020084 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000085 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090086 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000087 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080088 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010089 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070090 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030091 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020092};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020093
Jiri Olsadc0c6122017-01-09 10:51:58 +010094static volatile int auxtrace_record__snapshot_started;
95static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
96static DEFINE_TRIGGER(switch_output_trigger);
97
Alexey Budankov9d2ed642019-01-22 20:47:43 +030098static const char *affinity_tags[PERF_AFFINITY_MAX] = {
99 "SYS", "NODE", "CPU"
100};
101
Jiri Olsadc0c6122017-01-09 10:51:58 +0100102static bool switch_output_signal(struct record *rec)
103{
104 return rec->switch_output.signal &&
105 trigger_is_ready(&switch_output_trigger);
106}
107
108static bool switch_output_size(struct record *rec)
109{
110 return rec->switch_output.size &&
111 trigger_is_ready(&switch_output_trigger) &&
112 (rec->bytes_written >= rec->switch_output.size);
113}
114
Jiri Olsabfacbe32017-01-09 10:52:00 +0100115static bool switch_output_time(struct record *rec)
116{
117 return rec->switch_output.time &&
118 trigger_is_ready(&switch_output_trigger);
119}
120
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
122 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200123{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200124 struct perf_data_file *file = &rec->session->data->file;
125
126 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100127 pr_err("failed to write perf data, error: %m\n");
128 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200129 }
David Ahern8d3eca22012-08-26 12:24:47 -0600130
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300131 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100132
133 if (switch_output_size(rec))
134 trigger_hit(&switch_output_trigger);
135
David Ahern8d3eca22012-08-26 12:24:47 -0600136 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200137}
138
Alexey Budankovef781122019-03-18 20:44:12 +0300139static int record__aio_enabled(struct record *rec);
140static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300141static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
142 void *src, size_t src_size);
143
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300144#ifdef HAVE_AIO_SUPPORT
145static int record__aio_write(struct aiocb *cblock, int trace_fd,
146 void *buf, size_t size, off_t off)
147{
148 int rc;
149
150 cblock->aio_fildes = trace_fd;
151 cblock->aio_buf = buf;
152 cblock->aio_nbytes = size;
153 cblock->aio_offset = off;
154 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
155
156 do {
157 rc = aio_write(cblock);
158 if (rc == 0) {
159 break;
160 } else if (errno != EAGAIN) {
161 cblock->aio_fildes = -1;
162 pr_err("failed to queue perf data, error: %m\n");
163 break;
164 }
165 } while (1);
166
167 return rc;
168}
169
170static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
171{
172 void *rem_buf;
173 off_t rem_off;
174 size_t rem_size;
175 int rc, aio_errno;
176 ssize_t aio_ret, written;
177
178 aio_errno = aio_error(cblock);
179 if (aio_errno == EINPROGRESS)
180 return 0;
181
182 written = aio_ret = aio_return(cblock);
183 if (aio_ret < 0) {
184 if (aio_errno != EINTR)
185 pr_err("failed to write perf data, error: %m\n");
186 written = 0;
187 }
188
189 rem_size = cblock->aio_nbytes - written;
190
191 if (rem_size == 0) {
192 cblock->aio_fildes = -1;
193 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300194 * md->refcount is incremented in record__aio_pushfn() for
195 * every aio write request started in record__aio_push() so
196 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300197 */
198 perf_mmap__put(md);
199 rc = 1;
200 } else {
201 /*
202 * aio write request may require restart with the
203 * reminder if the kernel didn't write whole
204 * chunk at once.
205 */
206 rem_off = cblock->aio_offset + written;
207 rem_buf = (void *)(cblock->aio_buf + written);
208 record__aio_write(cblock, cblock->aio_fildes,
209 rem_buf, rem_size, rem_off);
210 rc = 0;
211 }
212
213 return rc;
214}
215
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300217{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300218 struct aiocb **aiocb = md->aio.aiocb;
219 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300220 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300221 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300222
223 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300224 do_suspend = 0;
225 for (i = 0; i < md->aio.nr_cblocks; ++i) {
226 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
227 if (sync_all)
228 aiocb[i] = NULL;
229 else
230 return i;
231 } else {
232 /*
233 * Started aio write is not complete yet
234 * so it has to be waited before the
235 * next allocation.
236 */
237 aiocb[i] = &cblocks[i];
238 do_suspend = 1;
239 }
240 }
241 if (!do_suspend)
242 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300243
Alexey Budankov93f20c02018-11-06 12:07:19 +0300244 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300245 if (!(errno == EAGAIN || errno == EINTR))
246 pr_err("failed to sync perf data, error: %m\n");
247 }
248 } while (1);
249}
250
Alexey Budankovef781122019-03-18 20:44:12 +0300251struct record_aio {
252 struct record *rec;
253 void *data;
254 size_t size;
255};
256
257static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300258{
Alexey Budankovef781122019-03-18 20:44:12 +0300259 struct record_aio *aio = to;
260
261 /*
262 * map->base data pointed by buf is copied into free map->aio.data[] buffer
263 * to release space in the kernel buffer as fast as possible, calling
264 * perf_mmap__consume() from perf_mmap__push() function.
265 *
266 * That lets the kernel to proceed with storing more profiling data into
267 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
268 *
269 * Coping can be done in two steps in case the chunk of profiling data
270 * crosses the upper bound of the kernel buffer. In this case we first move
271 * part of data from map->start till the upper bound and then the reminder
272 * from the beginning of the kernel buffer till the end of the data chunk.
273 */
274
275 if (record__comp_enabled(aio->rec)) {
276 size = zstd_compress(aio->rec->session, aio->data + aio->size,
277 perf_mmap__mmap_len(map) - aio->size,
278 buf, size);
279 } else {
280 memcpy(aio->data + aio->size, buf, size);
281 }
282
283 if (!aio->size) {
284 /*
285 * Increment map->refcount to guard map->aio.data[] buffer
286 * from premature deallocation because map object can be
287 * released earlier than aio write request started on
288 * map->aio.data[] buffer is complete.
289 *
290 * perf_mmap__put() is done at record__aio_complete()
291 * after started aio request completion or at record__aio_push()
292 * if the request failed to start.
293 */
294 perf_mmap__get(map);
295 }
296
297 aio->size += size;
298
299 return size;
300}
301
302static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
303{
304 int ret, idx;
305 int trace_fd = rec->session->data->file.fd;
306 struct record_aio aio = { .rec = rec, .size = 0 };
307
308 /*
309 * Call record__aio_sync() to wait till map->aio.data[] buffer
310 * becomes available after previous aio write operation.
311 */
312
313 idx = record__aio_sync(map, false);
314 aio.data = map->aio.data[idx];
315 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
316 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
317 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300318
319 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300320 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300321 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300322 *off += aio.size;
323 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300324 if (switch_output_size(rec))
325 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300326 } else {
327 /*
328 * Decrement map->refcount incremented in record__aio_pushfn()
329 * back if record__aio_write() operation failed to start, otherwise
330 * map->refcount is decremented in record__aio_complete() after
331 * aio write operation finishes successfully.
332 */
333 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300334 }
335
336 return ret;
337}
338
339static off_t record__aio_get_pos(int trace_fd)
340{
341 return lseek(trace_fd, 0, SEEK_CUR);
342}
343
344static void record__aio_set_pos(int trace_fd, off_t pos)
345{
346 lseek(trace_fd, pos, SEEK_SET);
347}
348
349static void record__aio_mmap_read_sync(struct record *rec)
350{
351 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200352 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300353 struct perf_mmap *maps = evlist->mmap;
354
Alexey Budankovef781122019-03-18 20:44:12 +0300355 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300356 return;
357
358 for (i = 0; i < evlist->nr_mmaps; i++) {
359 struct perf_mmap *map = &maps[i];
360
361 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300362 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300363 }
364}
365
366static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300367static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300368
369static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300370 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300371 int unset)
372{
373 struct record_opts *opts = (struct record_opts *)opt->value;
374
Alexey Budankov93f20c02018-11-06 12:07:19 +0300375 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300376 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300377 } else {
378 if (str)
379 opts->nr_cblocks = strtol(str, NULL, 0);
380 if (!opts->nr_cblocks)
381 opts->nr_cblocks = nr_cblocks_default;
382 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300383
384 return 0;
385}
386#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300387static int nr_cblocks_max = 0;
388
Alexey Budankovef781122019-03-18 20:44:12 +0300389static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
390 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300391{
392 return -1;
393}
394
395static off_t record__aio_get_pos(int trace_fd __maybe_unused)
396{
397 return -1;
398}
399
400static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
401{
402}
403
404static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
405{
406}
407#endif
408
409static int record__aio_enabled(struct record *rec)
410{
411 return rec->opts.nr_cblocks > 0;
412}
413
Alexey Budankov470530b2019-03-18 20:40:26 +0300414#define MMAP_FLUSH_DEFAULT 1
415static int record__mmap_flush_parse(const struct option *opt,
416 const char *str,
417 int unset)
418{
419 int flush_max;
420 struct record_opts *opts = (struct record_opts *)opt->value;
421 static struct parse_tag tags[] = {
422 { .tag = 'B', .mult = 1 },
423 { .tag = 'K', .mult = 1 << 10 },
424 { .tag = 'M', .mult = 1 << 20 },
425 { .tag = 'G', .mult = 1 << 30 },
426 { .tag = 0 },
427 };
428
429 if (unset)
430 return 0;
431
432 if (str) {
433 opts->mmap_flush = parse_tag_value(str, tags);
434 if (opts->mmap_flush == (int)-1)
435 opts->mmap_flush = strtol(str, NULL, 0);
436 }
437
438 if (!opts->mmap_flush)
439 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
440
441 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
442 flush_max /= 4;
443 if (opts->mmap_flush > flush_max)
444 opts->mmap_flush = flush_max;
445
446 return 0;
447}
448
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300449#ifdef HAVE_ZSTD_SUPPORT
450static unsigned int comp_level_default = 1;
451
452static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
453{
454 struct record_opts *opts = opt->value;
455
456 if (unset) {
457 opts->comp_level = 0;
458 } else {
459 if (str)
460 opts->comp_level = strtol(str, NULL, 0);
461 if (!opts->comp_level)
462 opts->comp_level = comp_level_default;
463 }
464
465 return 0;
466}
467#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300468static unsigned int comp_level_max = 22;
469
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300470static int record__comp_enabled(struct record *rec)
471{
472 return rec->opts.comp_level > 0;
473}
474
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200475static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200476 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300477 struct perf_sample *sample __maybe_unused,
478 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200479{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300480 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200481 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200482}
483
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200484static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300485{
486 struct record *rec = to;
487
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300488 if (record__comp_enabled(rec)) {
489 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
490 bf = map->data;
491 }
492
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300493 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200494 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300495}
496
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300497static volatile int done;
498static volatile int signr = -1;
499static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000500
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300501static void sig_handler(int sig)
502{
503 if (sig == SIGCHLD)
504 child_finished = 1;
505 else
506 signr = sig;
507
508 done = 1;
509}
510
Wang Nana0748652016-11-26 07:03:28 +0000511static void sigsegv_handler(int sig)
512{
513 perf_hooks__recover();
514 sighandler_dump_stack(sig);
515}
516
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300517static void record__sig_exit(void)
518{
519 if (signr == -1)
520 return;
521
522 signal(signr, SIG_DFL);
523 raise(signr);
524}
525
Adrian Huntere31f0d02015-04-30 17:37:27 +0300526#ifdef HAVE_AUXTRACE_SUPPORT
527
Adrian Hunteref149c22015-04-09 18:53:45 +0300528static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200529 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300530 union perf_event *event, void *data1,
531 size_t len1, void *data2, size_t len2)
532{
533 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100534 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300535 size_t padding;
536 u8 pad[8] = {0};
537
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100538 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300539 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100540 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300541 int err;
542
543 file_offset = lseek(fd, 0, SEEK_CUR);
544 if (file_offset == -1)
545 return -1;
546 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
547 event, file_offset);
548 if (err)
549 return err;
550 }
551
Adrian Hunteref149c22015-04-09 18:53:45 +0300552 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
553 padding = (len1 + len2) & 7;
554 if (padding)
555 padding = 8 - padding;
556
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200557 record__write(rec, map, event, event->header.size);
558 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300559 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200560 record__write(rec, map, data2, len2);
561 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300562
563 return 0;
564}
565
566static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200567 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300568{
569 int ret;
570
Jiri Olsae035f4c2018-09-13 14:54:05 +0200571 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300572 record__process_auxtrace);
573 if (ret < 0)
574 return ret;
575
576 if (ret)
577 rec->samples++;
578
579 return 0;
580}
581
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300582static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200583 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300584{
585 int ret;
586
Jiri Olsae035f4c2018-09-13 14:54:05 +0200587 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300588 record__process_auxtrace,
589 rec->opts.auxtrace_snapshot_size);
590 if (ret < 0)
591 return ret;
592
593 if (ret)
594 rec->samples++;
595
596 return 0;
597}
598
599static int record__auxtrace_read_snapshot_all(struct record *rec)
600{
601 int i;
602 int rc = 0;
603
604 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200605 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300606
Jiri Olsae035f4c2018-09-13 14:54:05 +0200607 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300608 continue;
609
Jiri Olsae035f4c2018-09-13 14:54:05 +0200610 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300611 rc = -1;
612 goto out;
613 }
614 }
615out:
616 return rc;
617}
618
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300619static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300620{
621 pr_debug("Recording AUX area tracing snapshot\n");
622 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000623 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300624 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300625 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000626 trigger_error(&auxtrace_snapshot_trigger);
627 else
628 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300629 }
630}
631
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300632static int record__auxtrace_snapshot_exit(struct record *rec)
633{
634 if (trigger_is_error(&auxtrace_snapshot_trigger))
635 return 0;
636
637 if (!auxtrace_record__snapshot_started &&
638 auxtrace_record__snapshot_start(rec->itr))
639 return -1;
640
641 record__read_auxtrace_snapshot(rec, true);
642 if (trigger_is_error(&auxtrace_snapshot_trigger))
643 return -1;
644
645 return 0;
646}
647
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200648static int record__auxtrace_init(struct record *rec)
649{
650 int err;
651
652 if (!rec->itr) {
653 rec->itr = auxtrace_record__init(rec->evlist, &err);
654 if (err)
655 return err;
656 }
657
658 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
659 rec->opts.auxtrace_snapshot_opts);
660 if (err)
661 return err;
662
663 return auxtrace_parse_filters(rec->evlist);
664}
665
Adrian Huntere31f0d02015-04-30 17:37:27 +0300666#else
667
668static inline
669int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200670 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300671{
672 return 0;
673}
674
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300675static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300676void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
677 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300678{
679}
680
681static inline
682int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
683{
684 return 0;
685}
686
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300687static inline
688int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
689{
690 return 0;
691}
692
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200693static int record__auxtrace_init(struct record *rec __maybe_unused)
694{
695 return 0;
696}
697
Adrian Huntere31f0d02015-04-30 17:37:27 +0300698#endif
699
Wang Nancda57a82016-06-27 10:24:03 +0000700static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200701 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000702{
703 struct record_opts *opts = &rec->opts;
704 char msg[512];
705
Alexey Budankovf13de662019-01-22 20:50:57 +0300706 if (opts->affinity != PERF_AFFINITY_SYS)
707 cpu__setup_cpunode_map();
708
Wang Nan7a276ff2017-12-03 02:00:38 +0000709 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000710 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300711 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300712 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300713 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000714 if (errno == EPERM) {
715 pr_err("Permission error mapping pages.\n"
716 "Consider increasing "
717 "/proc/sys/kernel/perf_event_mlock_kb,\n"
718 "or try again with a smaller value of -m/--mmap_pages.\n"
719 "(current value: %u,%u)\n",
720 opts->mmap_pages, opts->auxtrace_mmap_pages);
721 return -errno;
722 } else {
723 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300724 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000725 if (errno)
726 return -errno;
727 else
728 return -EINVAL;
729 }
730 }
731 return 0;
732}
733
734static int record__mmap(struct record *rec)
735{
736 return record__mmap_evlist(rec, rec->evlist);
737}
738
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300739static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200740{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300741 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200742 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200743 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200744 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300745 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600746 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200747
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300748 /*
749 * For initial_delay we need to add a dummy event so that we can track
750 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
751 * real events, the ones asked by the user.
752 */
753 if (opts->initial_delay) {
754 if (perf_evlist__add_dummy(evlist))
755 return -ENOMEM;
756
757 pos = perf_evlist__first(evlist);
758 pos->tracking = 0;
759 pos = perf_evlist__last(evlist);
760 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200761 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300762 }
763
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300764 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100765
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300766 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200767try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200768 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300769 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900770 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300771 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300772 goto try_again;
773 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700774 if ((errno == EINVAL || errno == EBADF) &&
775 pos->leader != pos &&
776 pos->weak_group) {
777 pos = perf_evlist__reset_weak_group(evlist, pos);
778 goto try_again;
779 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300780 rc = -errno;
781 perf_evsel__open_strerror(pos, &opts->target,
782 errno, msg, sizeof(msg));
783 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600784 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300785 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800786
787 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800788 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200789
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300790 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300791 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300792 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300793 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600794 rc = -1;
795 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100796 }
797
Wang Nancda57a82016-06-27 10:24:03 +0000798 rc = record__mmap(rec);
799 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600800 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200801
Jiri Olsa563aecb2013-06-05 13:35:06 +0200802 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300803 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600804out:
805 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200806}
807
Namhyung Kime3d59112015-01-29 17:06:44 +0900808static int process_sample_event(struct perf_tool *tool,
809 union perf_event *event,
810 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200811 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900812 struct machine *machine)
813{
814 struct record *rec = container_of(tool, struct record, tool);
815
Jin Yao68588ba2017-12-08 21:13:42 +0800816 if (rec->evlist->first_sample_time == 0)
817 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900818
Jin Yao68588ba2017-12-08 21:13:42 +0800819 rec->evlist->last_sample_time = sample->time;
820
821 if (rec->buildid_all)
822 return 0;
823
824 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900825 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
826}
827
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300828static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200829{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200830 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200831
Jiri Olsa45112e82019-02-21 10:41:29 +0100832 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300833 return 0;
834
Namhyung Kim00dc8652014-11-04 10:14:32 +0900835 /*
836 * During this process, it'll load kernel map and replace the
837 * dso->long_name to a real pathname it found. In this case
838 * we prefer the vmlinux path like
839 * /lib/modules/3.16.4/build/vmlinux
840 *
841 * rather than build-id path (in debug directory).
842 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
843 */
844 symbol_conf.ignore_vmlinux_buildid = true;
845
Namhyung Kim61566812016-01-11 22:37:09 +0900846 /*
847 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800848 * so no need to process samples. But if timestamp_boundary is enabled,
849 * it still needs to walk on all samples to get the timestamps of
850 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900851 */
Jin Yao68588ba2017-12-08 21:13:42 +0800852 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900853 rec->tool.sample = NULL;
854
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300855 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200856}
857
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200858static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800859{
860 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200861 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800862 /*
863 *As for guest kernel when processing subcommand record&report,
864 *we arrange module mmap prior to guest kernel mmap and trigger
865 *a preload dso because default guest module symbols are loaded
866 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
867 *method is used to avoid symbol missing when the first addr is
868 *in module instead of in guest kernel.
869 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200870 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200871 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800872 if (err < 0)
873 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300874 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800875
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800876 /*
877 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
878 * have no _text sometimes.
879 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200880 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200881 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800882 if (err < 0)
883 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300884 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800885}
886
Frederic Weisbecker98402802010-05-02 22:05:29 +0200887static struct perf_event_header finished_round_event = {
888 .size = sizeof(struct perf_event_header),
889 .type = PERF_RECORD_FINISHED_ROUND,
890};
891
Alexey Budankovf13de662019-01-22 20:50:57 +0300892static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
893{
894 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
895 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
896 CPU_ZERO(&rec->affinity_mask);
897 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
898 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
899 }
900}
901
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300902static size_t process_comp_header(void *record, size_t increment)
903{
Jiri Olsa72932372019-08-28 15:57:16 +0200904 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300905 size_t size = sizeof(*event);
906
907 if (increment) {
908 event->header.size += increment;
909 return increment;
910 }
911
912 event->header.type = PERF_RECORD_COMPRESSED;
913 event->header.size = size;
914
915 return size;
916}
917
918static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
919 void *src, size_t src_size)
920{
921 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200922 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300923
924 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
925 max_record_size, process_comp_header);
926
927 session->bytes_transferred += src_size;
928 session->bytes_compressed += compressed;
929
930 return compressed;
931}
932
Jiri Olsa63503db2019-07-21 13:23:52 +0200933static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300934 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200935{
Jiri Olsadcabb502014-07-25 16:56:16 +0200936 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200937 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600938 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000939 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300940 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300941 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200942
Wang Nancb216862016-06-27 10:24:04 +0000943 if (!evlist)
944 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300945
Wang Nan0b72d692017-12-04 16:51:07 +0000946 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000947 if (!maps)
948 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000949
Wang Nan0b72d692017-12-04 16:51:07 +0000950 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000951 return 0;
952
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300953 if (record__aio_enabled(rec))
954 off = record__aio_get_pos(trace_fd);
955
Wang Nana4ea0ec2016-07-14 08:34:36 +0000956 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300957 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200958 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000959
Jiri Olsae035f4c2018-09-13 14:54:05 +0200960 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300961 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300962 if (synch) {
963 flush = map->flush;
964 map->flush = 1;
965 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300966 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300967 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300968 if (synch)
969 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300970 rc = -1;
971 goto out;
972 }
973 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300974 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300975 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300976 if (synch)
977 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300978 rc = -1;
979 goto out;
980 }
David Ahern8d3eca22012-08-26 12:24:47 -0600981 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300982 if (synch)
983 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600984 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300985
Jiri Olsae035f4c2018-09-13 14:54:05 +0200986 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
987 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300988 rc = -1;
989 goto out;
990 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200991 }
992
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300993 if (record__aio_enabled(rec))
994 record__aio_set_pos(trace_fd, off);
995
Jiri Olsadcabb502014-07-25 16:56:16 +0200996 /*
997 * Mark the round finished in case we wrote
998 * at least one event.
999 */
1000 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001001 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001002
Wang Nan0b72d692017-12-04 16:51:07 +00001003 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001004 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001005out:
1006 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001007}
1008
Alexey Budankov470530b2019-03-18 20:40:26 +03001009static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001010{
1011 int err;
1012
Alexey Budankov470530b2019-03-18 20:40:26 +03001013 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001014 if (err)
1015 return err;
1016
Alexey Budankov470530b2019-03-18 20:40:26 +03001017 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001018}
1019
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001020static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001021{
David Ahern57706ab2013-11-06 11:41:34 -07001022 struct perf_session *session = rec->session;
1023 int feat;
1024
1025 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1026 perf_header__set_feat(&session->header, feat);
1027
1028 if (rec->no_buildid)
1029 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1030
Jiri Olsace9036a2019-07-21 13:24:23 +02001031 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001032 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1033
1034 if (!rec->opts.branch_stack)
1035 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001036
1037 if (!rec->opts.full_auxtrace)
1038 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001039
Alexey Budankovcf790512018-10-09 17:36:24 +03001040 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1041 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1042
Jiri Olsa258031c2019-03-08 14:47:39 +01001043 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001044 if (!record__comp_enabled(rec))
1045 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001046
Jiri Olsaffa517a2015-10-25 15:51:43 +01001047 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001048}
1049
Wang Nane1ab48b2016-02-26 09:32:10 +00001050static void
1051record__finish_output(struct record *rec)
1052{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001053 struct perf_data *data = &rec->data;
1054 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001055
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001056 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001057 return;
1058
1059 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001060 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001061
1062 if (!rec->no_buildid) {
1063 process_buildids(rec);
1064
1065 if (rec->buildid_all)
1066 dsos__hit_all(rec->session);
1067 }
1068 perf_session__write_header(rec->session, rec->evlist, fd, true);
1069
1070 return;
1071}
1072
Wang Nan4ea648a2016-07-14 08:34:47 +00001073static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001074{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001075 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001076 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001077
Wang Nan4ea648a2016-07-14 08:34:47 +00001078 if (rec->opts.tail_synthesize != tail)
1079 return 0;
1080
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001081 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1082 if (thread_map == NULL)
1083 return -1;
1084
1085 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001086 process_synthesized_event,
1087 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001088 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001089 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001090 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001091}
1092
Wang Nan4ea648a2016-07-14 08:34:47 +00001093static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001094
Wang Nanecfd7a92016-04-13 08:21:07 +00001095static int
1096record__switch_output(struct record *rec, bool at_exit)
1097{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001098 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001099 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001100 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001101
1102 /* Same Size: "2015122520103046"*/
1103 char timestamp[] = "InvalidTimestamp";
1104
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001105 record__aio_mmap_read_sync(rec);
1106
Wang Nan4ea648a2016-07-14 08:34:47 +00001107 record__synthesize(rec, true);
1108 if (target__none(&rec->opts.target))
1109 record__synthesize_workload(rec, true);
1110
Wang Nanecfd7a92016-04-13 08:21:07 +00001111 rec->samples = 0;
1112 record__finish_output(rec);
1113 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1114 if (err) {
1115 pr_err("Failed to get current timestamp\n");
1116 return -EINVAL;
1117 }
1118
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001119 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001120 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001121 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001122 if (fd >= 0 && !at_exit) {
1123 rec->bytes_written = 0;
1124 rec->session->header.data_size = 0;
1125 }
1126
1127 if (!quiet)
1128 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001129 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001130
Andi Kleen03724b22019-03-14 15:49:55 -07001131 if (rec->switch_output.num_files) {
1132 int n = rec->switch_output.cur_file + 1;
1133
1134 if (n >= rec->switch_output.num_files)
1135 n = 0;
1136 rec->switch_output.cur_file = n;
1137 if (rec->switch_output.filenames[n]) {
1138 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001139 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001140 }
1141 rec->switch_output.filenames[n] = new_filename;
1142 } else {
1143 free(new_filename);
1144 }
1145
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001146 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001147 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001148 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001149
Wang Nanbe7b0c92016-04-20 18:59:54 +00001150 /*
1151 * In 'perf record --switch-output' without -a,
1152 * record__synthesize() in record__switch_output() won't
1153 * generate tracking events because there's no thread_map
1154 * in evlist. Which causes newly created perf.data doesn't
1155 * contain map and comm information.
1156 * Create a fake thread_map and directly call
1157 * perf_event__synthesize_thread_map() for those events.
1158 */
1159 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001160 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001161 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001162 return fd;
1163}
1164
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001165static volatile int workload_exec_errno;
1166
1167/*
1168 * perf_evlist__prepare_workload will send a SIGUSR1
1169 * if the fork fails, since we asked by setting its
1170 * want_signal to true.
1171 */
Namhyung Kim45604712014-05-12 09:47:24 +09001172static void workload_exec_failed_signal(int signo __maybe_unused,
1173 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001174 void *ucontext __maybe_unused)
1175{
1176 workload_exec_errno = info->si_value.sival_int;
1177 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001178 child_finished = 1;
1179}
1180
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001181static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001182static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001183
Wang Nanee667f92016-06-27 10:24:05 +00001184static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001185perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001186{
Wang Nanb2cb6152016-07-14 08:34:39 +00001187 if (evlist) {
1188 if (evlist->mmap && evlist->mmap[0].base)
1189 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001190 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1191 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001192 }
Wang Nanee667f92016-06-27 10:24:05 +00001193 return NULL;
1194}
1195
Wang Nanc45628b2016-05-24 02:28:59 +00001196static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1197{
Wang Nanee667f92016-06-27 10:24:05 +00001198 const struct perf_event_mmap_page *pc;
1199
1200 pc = perf_evlist__pick_pc(rec->evlist);
1201 if (pc)
1202 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001203 return NULL;
1204}
1205
Wang Nan4ea648a2016-07-14 08:34:47 +00001206static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001207{
1208 struct perf_session *session = rec->session;
1209 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001210 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001211 struct record_opts *opts = &rec->opts;
1212 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001213 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001214 int err = 0;
1215
Wang Nan4ea648a2016-07-14 08:34:47 +00001216 if (rec->opts.tail_synthesize != tail)
1217 return 0;
1218
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001219 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001220 /*
1221 * We need to synthesize events first, because some
1222 * features works on top of them (on report side).
1223 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001224 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001225 process_synthesized_event);
1226 if (err < 0) {
1227 pr_err("Couldn't synthesize attrs.\n");
1228 goto out;
1229 }
1230
Jiri Olsaa2015512018-03-14 10:22:04 +01001231 err = perf_event__synthesize_features(tool, session, rec->evlist,
1232 process_synthesized_event);
1233 if (err < 0) {
1234 pr_err("Couldn't synthesize features.\n");
1235 return err;
1236 }
1237
Jiri Olsace9036a2019-07-21 13:24:23 +02001238 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001239 /*
1240 * FIXME err <= 0 here actually means that
1241 * there were no tracepoints so its not really
1242 * an error, just that we don't need to
1243 * synthesize anything. We really have to
1244 * return this more properly and also
1245 * propagate errors that now are calling die()
1246 */
1247 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1248 process_synthesized_event);
1249 if (err <= 0) {
1250 pr_err("Couldn't record tracing data.\n");
1251 goto out;
1252 }
1253 rec->bytes_written += err;
1254 }
1255 }
1256
Wang Nanc45628b2016-05-24 02:28:59 +00001257 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001258 process_synthesized_event, machine);
1259 if (err)
1260 goto out;
1261
Wang Nanc45c86e2016-02-26 09:32:07 +00001262 if (rec->opts.full_auxtrace) {
1263 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1264 session, process_synthesized_event);
1265 if (err)
1266 goto out;
1267 }
1268
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001269 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1270 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1271 machine);
1272 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1273 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1274 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001275
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001276 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1277 machine);
1278 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1279 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1280 "Check /proc/modules permission or run as root.\n");
1281 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001282
1283 if (perf_guest) {
1284 machines__process_guests(&session->machines,
1285 perf_event__synthesize_guest_os, tool);
1286 }
1287
Andi Kleenbfd8f722017-11-17 13:42:58 -08001288 err = perf_event__synthesize_extra_attr(&rec->tool,
1289 rec->evlist,
1290 process_synthesized_event,
1291 data->is_pipe);
1292 if (err)
1293 goto out;
1294
Jiri Olsa03617c22019-07-21 13:24:42 +02001295 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001296 process_synthesized_event,
1297 NULL);
1298 if (err < 0) {
1299 pr_err("Couldn't synthesize thread map.\n");
1300 return err;
1301 }
1302
Jiri Olsaf72f9012019-07-21 13:24:41 +02001303 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001304 process_synthesized_event, NULL);
1305 if (err < 0) {
1306 pr_err("Couldn't synthesize cpu map.\n");
1307 return err;
1308 }
1309
Song Liue5416952019-03-11 22:30:41 -07001310 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001311 machine, opts);
1312 if (err < 0)
1313 pr_warning("Couldn't synthesize bpf events.\n");
1314
Jiri Olsa03617c22019-07-21 13:24:42 +02001315 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001316 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001317 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001318out:
1319 return err;
1320}
1321
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001322static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001323{
David Ahern57706ab2013-11-06 11:41:34 -07001324 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001325 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001326 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001327 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001328 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001329 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001330 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001331 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001332 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001333 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001334 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001335 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001336
Namhyung Kim45604712014-05-12 09:47:24 +09001337 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001338 signal(SIGCHLD, sig_handler);
1339 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001340 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001341 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001342
Hari Bathinif3b36142017-03-08 02:11:43 +05301343 if (rec->opts.record_namespaces)
1344 tool->namespace_events = true;
1345
Jiri Olsadc0c6122017-01-09 10:51:58 +01001346 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001347 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001348 if (rec->opts.auxtrace_snapshot_mode)
1349 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001350 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001351 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001352 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001353 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001354 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001355
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001356 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001357 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001358 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001359 return -1;
1360 }
1361
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001362 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001363 rec->session = session;
1364
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001365 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1366 pr_err("Compression initialization failed.\n");
1367 return -1;
1368 }
1369
1370 session->header.env.comp_type = PERF_COMP_ZSTD;
1371 session->header.env.comp_level = rec->opts.comp_level;
1372
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001373 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001374
Alexey Budankovcf790512018-10-09 17:36:24 +03001375 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1376 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1377
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001378 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001379 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001380 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001381 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001382 if (err < 0) {
1383 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001384 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001385 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001386 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001387 }
1388
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001389 /*
1390 * If we have just single event and are sending data
1391 * through pipe, we need to force the ids allocation,
1392 * because we synthesize event name through the pipe
1393 * and need the id for that.
1394 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001395 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001396 rec->opts.sample_id = true;
1397
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001398 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001399 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001400 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001401 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001402 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001403
Wang Nan8690a2a2016-02-22 09:10:32 +00001404 err = bpf__apply_obj_config();
1405 if (err) {
1406 char errbuf[BUFSIZ];
1407
1408 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1409 pr_err("ERROR: Apply config to BPF failed: %s\n",
1410 errbuf);
1411 goto out_child;
1412 }
1413
Adrian Huntercca84822015-08-19 17:29:21 +03001414 /*
1415 * Normally perf_session__new would do this, but it doesn't have the
1416 * evlist.
1417 */
1418 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1419 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1420 rec->tool.ordered_events = false;
1421 }
1422
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001423 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001424 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1425
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001426 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001427 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001428 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001429 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001430 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001431 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001432 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001433 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001434 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001435
David Ahernd3665492012-02-06 15:27:52 -07001436 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001437 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001438 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001439 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001440 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001441 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001442 }
1443
Song Liud56354d2019-03-11 22:30:51 -07001444 if (!opts->no_bpf_event)
1445 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1446
Song Liu657ee552019-03-11 22:30:50 -07001447 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1448 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1449 opts->no_bpf_event = true;
1450 }
1451
Wang Nan4ea648a2016-07-14 08:34:47 +00001452 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001453 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001454 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001455
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001456 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001457 struct sched_param param;
1458
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001459 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001460 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001461 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001462 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001463 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001464 }
1465 }
1466
Jiri Olsa774cb492012-11-12 18:34:01 +01001467 /*
1468 * When perf is starting the traced process, all the events
1469 * (apart from group members) have enable_on_exec=1 set,
1470 * so don't spoil it by prematurely enabling them.
1471 */
Andi Kleen6619a532014-01-11 13:38:27 -08001472 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001473 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001474
Peter Zijlstra856e9662009-12-16 17:55:55 +01001475 /*
1476 * Let the child rip
1477 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001478 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001479 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001480 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301481 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001482
1483 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1484 if (event == NULL) {
1485 err = -ENOMEM;
1486 goto out_child;
1487 }
1488
Namhyung Kime803cf92015-09-22 09:24:55 +09001489 /*
1490 * Some H/W events are generated before COMM event
1491 * which is emitted during exec(), so perf script
1492 * cannot see a correct process name for those events.
1493 * Synthesize COMM event to prevent it.
1494 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301495 tgid = perf_event__synthesize_comm(tool, event,
1496 rec->evlist->workload.pid,
1497 process_synthesized_event,
1498 machine);
1499 free(event);
1500
1501 if (tgid == -1)
1502 goto out_child;
1503
1504 event = malloc(sizeof(event->namespaces) +
1505 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1506 machine->id_hdr_size);
1507 if (event == NULL) {
1508 err = -ENOMEM;
1509 goto out_child;
1510 }
1511
1512 /*
1513 * Synthesize NAMESPACES event for the command specified.
1514 */
1515 perf_event__synthesize_namespaces(tool, event,
1516 rec->evlist->workload.pid,
1517 tgid, process_synthesized_event,
1518 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001519 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001520
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001521 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001522 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001523
Andi Kleen6619a532014-01-11 13:38:27 -08001524 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001525 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001526 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001527 }
1528
Wang Nan5f9cf592016-04-20 18:59:49 +00001529 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001530 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001531 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001532 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001533 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001534
Wang Nan057374642016-07-14 08:34:43 +00001535 /*
1536 * rec->evlist->bkw_mmap_state is possible to be
1537 * BKW_MMAP_EMPTY here: when done == true and
1538 * hits != rec->samples in previous round.
1539 *
1540 * perf_evlist__toggle_bkw_mmap ensure we never
1541 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1542 */
1543 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1544 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1545
Alexey Budankov470530b2019-03-18 20:40:26 +03001546 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001547 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001548 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001549 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001550 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001551 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001552
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001553 if (auxtrace_record__snapshot_started) {
1554 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001555 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001556 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001557 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001558 pr_err("AUX area tracing snapshot failed\n");
1559 err = -1;
1560 goto out_child;
1561 }
1562 }
1563
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001564 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001565 /*
1566 * If switch_output_trigger is hit, the data in
1567 * overwritable ring buffer should have been collected,
1568 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1569 *
1570 * If SIGUSR2 raise after or during record__mmap_read_all(),
1571 * record__mmap_read_all() didn't collect data from
1572 * overwritable ring buffer. Read again.
1573 */
1574 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1575 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001576 trigger_ready(&switch_output_trigger);
1577
Wang Nan057374642016-07-14 08:34:43 +00001578 /*
1579 * Reenable events in overwrite ring buffer after
1580 * record__mmap_read_all(): we should have collected
1581 * data from it.
1582 */
1583 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1584
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001585 if (!quiet)
1586 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1587 waking);
1588 waking = 0;
1589 fd = record__switch_output(rec, false);
1590 if (fd < 0) {
1591 pr_err("Failed to switch to new file\n");
1592 trigger_error(&switch_output_trigger);
1593 err = fd;
1594 goto out_child;
1595 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001596
1597 /* re-arm the alarm */
1598 if (rec->switch_output.time)
1599 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001600 }
1601
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001602 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001603 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001604 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001605 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001606 /*
1607 * Propagate error, only if there's any. Ignore positive
1608 * number of returned events and interrupt error.
1609 */
1610 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001611 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001612 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001613
1614 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1615 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001616 }
1617
Jiri Olsa774cb492012-11-12 18:34:01 +01001618 /*
1619 * When perf is starting the traced process, at the end events
1620 * die with the process and we wait for that. Thus no need to
1621 * disable events in this case.
1622 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001623 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001624 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001625 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001626 disabled = true;
1627 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001628 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001629
Wang Nan5f9cf592016-04-20 18:59:49 +00001630 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001631 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001632
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001633 if (opts->auxtrace_snapshot_on_exit)
1634 record__auxtrace_snapshot_exit(rec);
1635
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001636 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001637 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001638 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001639 pr_err("Workload failed: %s\n", emsg);
1640 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001641 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001642 }
1643
Namhyung Kime3d59112015-01-29 17:06:44 +09001644 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001645 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001646
Wang Nan4ea648a2016-07-14 08:34:47 +00001647 if (target__none(&rec->opts.target))
1648 record__synthesize_workload(rec, true);
1649
Namhyung Kim45604712014-05-12 09:47:24 +09001650out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001651 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001652 record__aio_mmap_read_sync(rec);
1653
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001654 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1655 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1656 session->header.env.comp_ratio = ratio + 0.5;
1657 }
1658
Namhyung Kim45604712014-05-12 09:47:24 +09001659 if (forks) {
1660 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001661
Namhyung Kim45604712014-05-12 09:47:24 +09001662 if (!child_finished)
1663 kill(rec->evlist->workload.pid, SIGTERM);
1664
1665 wait(&exit_status);
1666
1667 if (err < 0)
1668 status = err;
1669 else if (WIFEXITED(exit_status))
1670 status = WEXITSTATUS(exit_status);
1671 else if (WIFSIGNALED(exit_status))
1672 signr = WTERMSIG(exit_status);
1673 } else
1674 status = err;
1675
Wang Nan4ea648a2016-07-14 08:34:47 +00001676 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001677 /* this will be recalculated during process_buildids() */
1678 rec->samples = 0;
1679
Wang Nanecfd7a92016-04-13 08:21:07 +00001680 if (!err) {
1681 if (!rec->timestamp_filename) {
1682 record__finish_output(rec);
1683 } else {
1684 fd = record__switch_output(rec, true);
1685 if (fd < 0) {
1686 status = fd;
1687 goto out_delete_session;
1688 }
1689 }
1690 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001691
Wang Nana0748652016-11-26 07:03:28 +00001692 perf_hooks__invoke_record_end();
1693
Namhyung Kime3d59112015-01-29 17:06:44 +09001694 if (!err && !quiet) {
1695 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001696 const char *postfix = rec->timestamp_filename ?
1697 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001698
Adrian Hunteref149c22015-04-09 18:53:45 +03001699 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001700 scnprintf(samples, sizeof(samples),
1701 " (%" PRIu64 " samples)", rec->samples);
1702 else
1703 samples[0] = '\0';
1704
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001705 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001706 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001707 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001708 if (ratio) {
1709 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1710 rec->session->bytes_transferred / 1024.0 / 1024.0,
1711 ratio);
1712 }
1713 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001714 }
1715
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001716out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001717 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001718 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001719
1720 if (!opts->no_bpf_event)
1721 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001722 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001723}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001724
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001725static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001726{
Kan Liangaad2b212015-01-05 13:23:04 -05001727 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001728
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001729 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001730
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001731 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001732 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001733 callchain->dump_size);
1734}
1735
1736int record_opts__parse_callchain(struct record_opts *record,
1737 struct callchain_param *callchain,
1738 const char *arg, bool unset)
1739{
1740 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001741 callchain->enabled = !unset;
1742
1743 /* --no-call-graph */
1744 if (unset) {
1745 callchain->record_mode = CALLCHAIN_NONE;
1746 pr_debug("callchain: disabled\n");
1747 return 0;
1748 }
1749
1750 ret = parse_callchain_record_opt(arg, callchain);
1751 if (!ret) {
1752 /* Enable data address sampling for DWARF unwind. */
1753 if (callchain->record_mode == CALLCHAIN_DWARF)
1754 record->sample_address = true;
1755 callchain_debug(callchain);
1756 }
1757
1758 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001759}
1760
Kan Liangc421e802015-07-29 05:42:12 -04001761int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001762 const char *arg,
1763 int unset)
1764{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001765 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001766}
1767
Kan Liangc421e802015-07-29 05:42:12 -04001768int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001769 const char *arg __maybe_unused,
1770 int unset __maybe_unused)
1771{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001772 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001773
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001774 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001775
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001776 if (callchain->record_mode == CALLCHAIN_NONE)
1777 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001778
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001779 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001780 return 0;
1781}
1782
Jiri Olsaeb853e82014-02-03 12:44:42 +01001783static int perf_record_config(const char *var, const char *value, void *cb)
1784{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001785 struct record *rec = cb;
1786
1787 if (!strcmp(var, "record.build-id")) {
1788 if (!strcmp(value, "cache"))
1789 rec->no_buildid_cache = false;
1790 else if (!strcmp(value, "no-cache"))
1791 rec->no_buildid_cache = true;
1792 else if (!strcmp(value, "skip"))
1793 rec->no_buildid = true;
1794 else
1795 return -1;
1796 return 0;
1797 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001798 if (!strcmp(var, "record.call-graph")) {
1799 var = "call-graph.record-mode";
1800 return perf_default_config(var, value, cb);
1801 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001802#ifdef HAVE_AIO_SUPPORT
1803 if (!strcmp(var, "record.aio")) {
1804 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1805 if (!rec->opts.nr_cblocks)
1806 rec->opts.nr_cblocks = nr_cblocks_default;
1807 }
1808#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001809
Yisheng Xiecff17202018-03-12 19:25:57 +08001810 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001811}
1812
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001813struct clockid_map {
1814 const char *name;
1815 int clockid;
1816};
1817
1818#define CLOCKID_MAP(n, c) \
1819 { .name = n, .clockid = (c), }
1820
1821#define CLOCKID_END { .name = NULL, }
1822
1823
1824/*
1825 * Add the missing ones, we need to build on many distros...
1826 */
1827#ifndef CLOCK_MONOTONIC_RAW
1828#define CLOCK_MONOTONIC_RAW 4
1829#endif
1830#ifndef CLOCK_BOOTTIME
1831#define CLOCK_BOOTTIME 7
1832#endif
1833#ifndef CLOCK_TAI
1834#define CLOCK_TAI 11
1835#endif
1836
1837static const struct clockid_map clockids[] = {
1838 /* available for all events, NMI safe */
1839 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1840 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1841
1842 /* available for some events */
1843 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1844 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1845 CLOCKID_MAP("tai", CLOCK_TAI),
1846
1847 /* available for the lazy */
1848 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1849 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1850 CLOCKID_MAP("real", CLOCK_REALTIME),
1851 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1852
1853 CLOCKID_END,
1854};
1855
Alexey Budankovcf790512018-10-09 17:36:24 +03001856static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1857{
1858 struct timespec res;
1859
1860 *res_ns = 0;
1861 if (!clock_getres(clk_id, &res))
1862 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1863 else
1864 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1865
1866 return 0;
1867}
1868
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001869static int parse_clockid(const struct option *opt, const char *str, int unset)
1870{
1871 struct record_opts *opts = (struct record_opts *)opt->value;
1872 const struct clockid_map *cm;
1873 const char *ostr = str;
1874
1875 if (unset) {
1876 opts->use_clockid = 0;
1877 return 0;
1878 }
1879
1880 /* no arg passed */
1881 if (!str)
1882 return 0;
1883
1884 /* no setting it twice */
1885 if (opts->use_clockid)
1886 return -1;
1887
1888 opts->use_clockid = true;
1889
1890 /* if its a number, we're done */
1891 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001892 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001893
1894 /* allow a "CLOCK_" prefix to the name */
1895 if (!strncasecmp(str, "CLOCK_", 6))
1896 str += 6;
1897
1898 for (cm = clockids; cm->name; cm++) {
1899 if (!strcasecmp(str, cm->name)) {
1900 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001901 return get_clockid_res(opts->clockid,
1902 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001903 }
1904 }
1905
1906 opts->use_clockid = false;
1907 ui__warning("unknown clockid %s, check man page\n", ostr);
1908 return -1;
1909}
1910
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001911static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1912{
1913 struct record_opts *opts = (struct record_opts *)opt->value;
1914
1915 if (unset || !str)
1916 return 0;
1917
1918 if (!strcasecmp(str, "node"))
1919 opts->affinity = PERF_AFFINITY_NODE;
1920 else if (!strcasecmp(str, "cpu"))
1921 opts->affinity = PERF_AFFINITY_CPU;
1922
1923 return 0;
1924}
1925
Adrian Huntere9db1312015-04-09 18:53:46 +03001926static int record__parse_mmap_pages(const struct option *opt,
1927 const char *str,
1928 int unset __maybe_unused)
1929{
1930 struct record_opts *opts = opt->value;
1931 char *s, *p;
1932 unsigned int mmap_pages;
1933 int ret;
1934
1935 if (!str)
1936 return -EINVAL;
1937
1938 s = strdup(str);
1939 if (!s)
1940 return -ENOMEM;
1941
1942 p = strchr(s, ',');
1943 if (p)
1944 *p = '\0';
1945
1946 if (*s) {
1947 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1948 if (ret)
1949 goto out_free;
1950 opts->mmap_pages = mmap_pages;
1951 }
1952
1953 if (!p) {
1954 ret = 0;
1955 goto out_free;
1956 }
1957
1958 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1959 if (ret)
1960 goto out_free;
1961
1962 opts->auxtrace_mmap_pages = mmap_pages;
1963
1964out_free:
1965 free(s);
1966 return ret;
1967}
1968
Jiri Olsa0c582442017-01-09 10:51:59 +01001969static void switch_output_size_warn(struct record *rec)
1970{
1971 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1972 struct switch_output *s = &rec->switch_output;
1973
1974 wakeup_size /= 2;
1975
1976 if (s->size < wakeup_size) {
1977 char buf[100];
1978
1979 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1980 pr_warning("WARNING: switch-output data size lower than "
1981 "wakeup kernel buffer size (%s) "
1982 "expect bigger perf.data sizes\n", buf);
1983 }
1984}
1985
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001986static int switch_output_setup(struct record *rec)
1987{
1988 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001989 static struct parse_tag tags_size[] = {
1990 { .tag = 'B', .mult = 1 },
1991 { .tag = 'K', .mult = 1 << 10 },
1992 { .tag = 'M', .mult = 1 << 20 },
1993 { .tag = 'G', .mult = 1 << 30 },
1994 { .tag = 0 },
1995 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001996 static struct parse_tag tags_time[] = {
1997 { .tag = 's', .mult = 1 },
1998 { .tag = 'm', .mult = 60 },
1999 { .tag = 'h', .mult = 60*60 },
2000 { .tag = 'd', .mult = 60*60*24 },
2001 { .tag = 0 },
2002 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002003 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002004
2005 if (!s->set)
2006 return 0;
2007
2008 if (!strcmp(s->str, "signal")) {
2009 s->signal = true;
2010 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002011 goto enabled;
2012 }
2013
2014 val = parse_tag_value(s->str, tags_size);
2015 if (val != (unsigned long) -1) {
2016 s->size = val;
2017 pr_debug("switch-output with %s size threshold\n", s->str);
2018 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002019 }
2020
Jiri Olsabfacbe32017-01-09 10:52:00 +01002021 val = parse_tag_value(s->str, tags_time);
2022 if (val != (unsigned long) -1) {
2023 s->time = val;
2024 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2025 s->str, s->time);
2026 goto enabled;
2027 }
2028
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002029 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002030
2031enabled:
2032 rec->timestamp_filename = true;
2033 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002034
2035 if (s->size && !rec->opts.no_buffering)
2036 switch_output_size_warn(rec);
2037
Jiri Olsadc0c6122017-01-09 10:51:58 +01002038 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002039}
2040
Namhyung Kime5b2c202014-10-23 00:15:46 +09002041static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002042 "perf record [<options>] [<command>]",
2043 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002044 NULL
2045};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002046const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002047
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002048/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002049 * XXX Ideally would be local to cmd_record() and passed to a record__new
2050 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002051 * after cmd_record() exits, but since record_options need to be accessible to
2052 * builtin-script, leave it here.
2053 *
2054 * At least we don't ouch it in all the other functions here directly.
2055 *
2056 * Just say no to tons of global variables, sigh.
2057 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002058static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002059 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002060 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002061 .mmap_pages = UINT_MAX,
2062 .user_freq = UINT_MAX,
2063 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002064 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002065 .target = {
2066 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002067 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002068 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002069 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002070 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002071 .tool = {
2072 .sample = process_sample_event,
2073 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002074 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002075 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302076 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002077 .mmap = perf_event__process_mmap,
2078 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002079 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002080 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002081};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002082
Namhyung Kim76a26542015-10-22 23:28:32 +09002083const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2084 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002085
Wang Nan0aab2132016-06-16 08:02:41 +00002086static bool dry_run;
2087
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002088/*
2089 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2090 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002091 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002092 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2093 * using pipes, etc.
2094 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002095static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002096 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002097 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002098 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002099 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002100 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002101 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2102 NULL, "don't record events from perf itself",
2103 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002104 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002105 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002106 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002107 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002108 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002109 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002110 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002111 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002112 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002113 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002114 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002115 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002116 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002117 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002118 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002119 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002120 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002121 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2122 &record.opts.no_inherit_set,
2123 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002124 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2125 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002126 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002127 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002128 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2129 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002130 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2131 "profile at this frequency",
2132 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002133 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2134 "number of mmap data pages and AUX area tracing mmap pages",
2135 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002136 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2137 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2138 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002139 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002140 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002141 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002142 NULL, "enables call-graph recording" ,
2143 &record_callchain_opt),
2144 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002145 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002146 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002147 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002148 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002149 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002150 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002151 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002152 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002153 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2154 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002155 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002156 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2157 &record.opts.sample_time_set,
2158 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002159 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2160 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002161 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002162 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002163 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2164 &record.no_buildid_cache_set,
2165 "do not update the buildid cache"),
2166 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2167 &record.no_buildid_set,
2168 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002169 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002170 "monitor event in cgroup name only",
2171 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002172 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002173 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002174 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2175 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002176
2177 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2178 "branch any", "sample any taken branches",
2179 parse_branch_stack),
2180
2181 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2182 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002183 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002184 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2185 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002186 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2187 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002188 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2189 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002190 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2191 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002192 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002193 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2194 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002195 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002196 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2197 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002198 OPT_CALLBACK('k', "clockid", &record.opts,
2199 "clockid", "clockid to use for events, see clock_gettime()",
2200 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002201 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2202 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002203 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002204 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302205 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2206 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002207 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2208 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002209 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2210 "Configure all used events to run in kernel space.",
2211 PARSE_OPT_EXCLUSIVE),
2212 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2213 "Configure all used events to run in user space.",
2214 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002215 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2216 "collect kernel callchains"),
2217 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2218 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002219 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2220 "clang binary to use for compiling BPF scriptlets"),
2221 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2222 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002223 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2224 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002225 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2226 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002227 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2228 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002229 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2230 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002231 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002232 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2233 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002234 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002235 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2236 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002237 OPT_BOOLEAN(0, "dry-run", &dry_run,
2238 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002239#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002240 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2241 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002242 record__aio_parse),
2243#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002244 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2245 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2246 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002247#ifdef HAVE_ZSTD_SUPPORT
2248 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2249 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2250 record__parse_comp_level),
2251#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002252 OPT_END()
2253};
2254
Namhyung Kime5b2c202014-10-23 00:15:46 +09002255struct option *record_options = __record_options;
2256
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002257int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002258{
Adrian Hunteref149c22015-04-09 18:53:45 +03002259 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002260 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002261 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002262
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002263 setlocale(LC_ALL, "");
2264
Wang Nan48e1cab2015-12-14 10:39:22 +00002265#ifndef HAVE_LIBBPF_SUPPORT
2266# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2267 set_nobuild('\0', "clang-path", true);
2268 set_nobuild('\0', "clang-opt", true);
2269# undef set_nobuild
2270#endif
2271
He Kuang7efe0e02015-12-14 10:39:23 +00002272#ifndef HAVE_BPF_PROLOGUE
2273# if !defined (HAVE_DWARF_SUPPORT)
2274# define REASON "NO_DWARF=1"
2275# elif !defined (HAVE_LIBBPF_SUPPORT)
2276# define REASON "NO_LIBBPF=1"
2277# else
2278# define REASON "this architecture doesn't support BPF prologue"
2279# endif
2280# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2281 set_nobuild('\0', "vmlinux", true);
2282# undef set_nobuild
2283# undef REASON
2284#endif
2285
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002286 CPU_ZERO(&rec->affinity_mask);
2287 rec->opts.affinity = PERF_AFFINITY_SYS;
2288
Jiri Olsa0f98b112019-07-21 13:23:55 +02002289 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002290 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002291 return -ENOMEM;
2292
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002293 err = perf_config(perf_record_config, rec);
2294 if (err)
2295 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002296
Tom Zanussibca647a2010-11-10 08:11:30 -06002297 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002298 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002299 if (quiet)
2300 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002301
2302 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002303 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002304 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002305
Namhyung Kimbea03402012-04-26 14:15:15 +09002306 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002307 usage_with_options_msg(record_usage, record_options,
2308 "cgroup monitoring only available in system-wide mode");
2309
Stephane Eranian023695d2011-02-14 11:20:01 +02002310 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002311
2312 if (rec->opts.comp_level != 0) {
2313 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2314 rec->no_buildid = true;
2315 }
2316
Adrian Hunterb757bb02015-07-21 12:44:04 +03002317 if (rec->opts.record_switch_events &&
2318 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002319 ui__error("kernel does not support recording context switch events\n");
2320 parse_options_usage(record_usage, record_options, "switch-events", 0);
2321 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002322 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002323
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002324 if (switch_output_setup(rec)) {
2325 parse_options_usage(record_usage, record_options, "switch-output", 0);
2326 return -EINVAL;
2327 }
2328
Jiri Olsabfacbe32017-01-09 10:52:00 +01002329 if (rec->switch_output.time) {
2330 signal(SIGALRM, alarm_sig_handler);
2331 alarm(rec->switch_output.time);
2332 }
2333
Andi Kleen03724b22019-03-14 15:49:55 -07002334 if (rec->switch_output.num_files) {
2335 rec->switch_output.filenames = calloc(sizeof(char *),
2336 rec->switch_output.num_files);
2337 if (!rec->switch_output.filenames)
2338 return -EINVAL;
2339 }
2340
Adrian Hunter1b36c032016-09-23 17:38:39 +03002341 /*
2342 * Allow aliases to facilitate the lookup of symbols for address
2343 * filters. Refer to auxtrace_parse_filters().
2344 */
2345 symbol_conf.allow_aliases = true;
2346
2347 symbol__init(NULL);
2348
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002349 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002350 if (err)
2351 goto out;
2352
Wang Nan0aab2132016-06-16 08:02:41 +00002353 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002354 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002355
Wang Nand7888572016-04-08 15:07:24 +00002356 err = bpf__setup_stdout(rec->evlist);
2357 if (err) {
2358 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2359 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2360 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002361 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002362 }
2363
Adrian Hunteref149c22015-04-09 18:53:45 +03002364 err = -ENOMEM;
2365
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002366 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002367 pr_warning(
2368"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
Igor Lubashevd06e5fa2019-08-26 21:39:16 -04002369"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002370"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2371"file is not found in the buildid cache or in the vmlinux path.\n\n"
2372"Samples in kernel modules won't be resolved at all.\n\n"
2373"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2374"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002375
Wang Nan0c1d46a2016-04-20 18:59:52 +00002376 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002377 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002378 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002379 /*
2380 * In 'perf record --switch-output', disable buildid
2381 * generation by default to reduce data file switching
2382 * overhead. Still generate buildid if they are required
2383 * explicitly using
2384 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002385 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002386 * --no-no-buildid-cache
2387 *
2388 * Following code equals to:
2389 *
2390 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2391 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2392 * disable_buildid_cache();
2393 */
2394 bool disable = true;
2395
2396 if (rec->no_buildid_set && !rec->no_buildid)
2397 disable = false;
2398 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2399 disable = false;
2400 if (disable) {
2401 rec->no_buildid = true;
2402 rec->no_buildid_cache = true;
2403 disable_buildid_cache();
2404 }
2405 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002406
Wang Nan4ea648a2016-07-14 08:34:47 +00002407 if (record.opts.overwrite)
2408 record.opts.tail_synthesize = true;
2409
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002410 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002411 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002412 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002413 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002414 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002415
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002416 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2417 rec->opts.no_inherit = true;
2418
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002419 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002420 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002421 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002422 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002423 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002424
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002425 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002426 if (err) {
2427 int saved_errno = errno;
2428
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002429 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002430 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002431
2432 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002433 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002434 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002435
Mengting Zhangca800062017-12-13 15:01:53 +08002436 /* Enable ignoring missing threads when -u/-p option is defined. */
2437 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002438
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002439 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002440 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002441 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002442
Adrian Hunteref149c22015-04-09 18:53:45 +03002443 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2444 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002445 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002446
Namhyung Kim61566812016-01-11 22:37:09 +09002447 /*
2448 * We take all buildids when the file contains
2449 * AUX area tracing data because we do not decode the
2450 * trace because it would take too long.
2451 */
2452 if (rec->opts.full_auxtrace)
2453 rec->buildid_all = true;
2454
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002455 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002456 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002457 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002458 }
2459
Alexey Budankov93f20c02018-11-06 12:07:19 +03002460 if (rec->opts.nr_cblocks > nr_cblocks_max)
2461 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002462 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002463
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002464 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002465 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002466
Alexey Budankov51255a82019-03-18 20:42:19 +03002467 if (rec->opts.comp_level > comp_level_max)
2468 rec->opts.comp_level = comp_level_max;
2469 pr_debug("comp level: %d\n", rec->opts.comp_level);
2470
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002471 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002472out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002473 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002474 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002475 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002476 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002477}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002478
2479static void snapshot_sig_handler(int sig __maybe_unused)
2480{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002481 struct record *rec = &record;
2482
Wang Nan5f9cf592016-04-20 18:59:49 +00002483 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2484 trigger_hit(&auxtrace_snapshot_trigger);
2485 auxtrace_record__snapshot_started = 1;
2486 if (auxtrace_record__snapshot_start(record.itr))
2487 trigger_error(&auxtrace_snapshot_trigger);
2488 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002489
Jiri Olsadc0c6122017-01-09 10:51:58 +01002490 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002491 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002492}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002493
2494static void alarm_sig_handler(int sig __maybe_unused)
2495{
2496 struct record *rec = &record;
2497
2498 if (switch_output_time(rec))
2499 trigger_hit(&switch_output_trigger);
2500}