blob: 1e1f97139f16e65fcb6b56e5e3bea3f7803c50ba [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030041#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030046#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020047
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030048#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030049#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030050#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030051#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020052#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020053#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030054#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030055#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030056#include <sys/wait.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053057#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030058#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030059#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030060#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030061
Jiri Olsa1b43b702017-01-09 10:51:56 +010062struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010063 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010064 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010065 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010066 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010067 const char *str;
68 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070069 char **filenames;
70 int num_files;
71 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010072};
73
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030074struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020075 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030076 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010078 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030079 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020080 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020082 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020083 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000084 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020085 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000086 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090087 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000088 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080089 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010090 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070091 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030092 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020093};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020094
Jiri Olsadc0c6122017-01-09 10:51:58 +010095static volatile int auxtrace_record__snapshot_started;
96static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
97static DEFINE_TRIGGER(switch_output_trigger);
98
Alexey Budankov9d2ed642019-01-22 20:47:43 +030099static const char *affinity_tags[PERF_AFFINITY_MAX] = {
100 "SYS", "NODE", "CPU"
101};
102
Jiri Olsadc0c6122017-01-09 10:51:58 +0100103static bool switch_output_signal(struct record *rec)
104{
105 return rec->switch_output.signal &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
109static bool switch_output_size(struct record *rec)
110{
111 return rec->switch_output.size &&
112 trigger_is_ready(&switch_output_trigger) &&
113 (rec->bytes_written >= rec->switch_output.size);
114}
115
Jiri Olsabfacbe32017-01-09 10:52:00 +0100116static bool switch_output_time(struct record *rec)
117{
118 return rec->switch_output.time &&
119 trigger_is_ready(&switch_output_trigger);
120}
121
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200122static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
123 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200124{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200125 struct perf_data_file *file = &rec->session->data->file;
126
127 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100128 pr_err("failed to write perf data, error: %m\n");
129 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200130 }
David Ahern8d3eca22012-08-26 12:24:47 -0600131
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300132 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100133
134 if (switch_output_size(rec))
135 trigger_hit(&switch_output_trigger);
136
David Ahern8d3eca22012-08-26 12:24:47 -0600137 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200138}
139
Alexey Budankovef781122019-03-18 20:44:12 +0300140static int record__aio_enabled(struct record *rec);
141static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300142static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
143 void *src, size_t src_size);
144
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300145#ifdef HAVE_AIO_SUPPORT
146static int record__aio_write(struct aiocb *cblock, int trace_fd,
147 void *buf, size_t size, off_t off)
148{
149 int rc;
150
151 cblock->aio_fildes = trace_fd;
152 cblock->aio_buf = buf;
153 cblock->aio_nbytes = size;
154 cblock->aio_offset = off;
155 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
156
157 do {
158 rc = aio_write(cblock);
159 if (rc == 0) {
160 break;
161 } else if (errno != EAGAIN) {
162 cblock->aio_fildes = -1;
163 pr_err("failed to queue perf data, error: %m\n");
164 break;
165 }
166 } while (1);
167
168 return rc;
169}
170
171static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
172{
173 void *rem_buf;
174 off_t rem_off;
175 size_t rem_size;
176 int rc, aio_errno;
177 ssize_t aio_ret, written;
178
179 aio_errno = aio_error(cblock);
180 if (aio_errno == EINPROGRESS)
181 return 0;
182
183 written = aio_ret = aio_return(cblock);
184 if (aio_ret < 0) {
185 if (aio_errno != EINTR)
186 pr_err("failed to write perf data, error: %m\n");
187 written = 0;
188 }
189
190 rem_size = cblock->aio_nbytes - written;
191
192 if (rem_size == 0) {
193 cblock->aio_fildes = -1;
194 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300195 * md->refcount is incremented in record__aio_pushfn() for
196 * every aio write request started in record__aio_push() so
197 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300198 */
199 perf_mmap__put(md);
200 rc = 1;
201 } else {
202 /*
203 * aio write request may require restart with the
204 * reminder if the kernel didn't write whole
205 * chunk at once.
206 */
207 rem_off = cblock->aio_offset + written;
208 rem_buf = (void *)(cblock->aio_buf + written);
209 record__aio_write(cblock, cblock->aio_fildes,
210 rem_buf, rem_size, rem_off);
211 rc = 0;
212 }
213
214 return rc;
215}
216
Alexey Budankov93f20c02018-11-06 12:07:19 +0300217static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300218{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300219 struct aiocb **aiocb = md->aio.aiocb;
220 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300221 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300222 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300223
224 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300225 do_suspend = 0;
226 for (i = 0; i < md->aio.nr_cblocks; ++i) {
227 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
228 if (sync_all)
229 aiocb[i] = NULL;
230 else
231 return i;
232 } else {
233 /*
234 * Started aio write is not complete yet
235 * so it has to be waited before the
236 * next allocation.
237 */
238 aiocb[i] = &cblocks[i];
239 do_suspend = 1;
240 }
241 }
242 if (!do_suspend)
243 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300244
Alexey Budankov93f20c02018-11-06 12:07:19 +0300245 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300246 if (!(errno == EAGAIN || errno == EINTR))
247 pr_err("failed to sync perf data, error: %m\n");
248 }
249 } while (1);
250}
251
Alexey Budankovef781122019-03-18 20:44:12 +0300252struct record_aio {
253 struct record *rec;
254 void *data;
255 size_t size;
256};
257
258static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300259{
Alexey Budankovef781122019-03-18 20:44:12 +0300260 struct record_aio *aio = to;
261
262 /*
263 * map->base data pointed by buf is copied into free map->aio.data[] buffer
264 * to release space in the kernel buffer as fast as possible, calling
265 * perf_mmap__consume() from perf_mmap__push() function.
266 *
267 * That lets the kernel to proceed with storing more profiling data into
268 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
269 *
270 * Coping can be done in two steps in case the chunk of profiling data
271 * crosses the upper bound of the kernel buffer. In this case we first move
272 * part of data from map->start till the upper bound and then the reminder
273 * from the beginning of the kernel buffer till the end of the data chunk.
274 */
275
276 if (record__comp_enabled(aio->rec)) {
277 size = zstd_compress(aio->rec->session, aio->data + aio->size,
278 perf_mmap__mmap_len(map) - aio->size,
279 buf, size);
280 } else {
281 memcpy(aio->data + aio->size, buf, size);
282 }
283
284 if (!aio->size) {
285 /*
286 * Increment map->refcount to guard map->aio.data[] buffer
287 * from premature deallocation because map object can be
288 * released earlier than aio write request started on
289 * map->aio.data[] buffer is complete.
290 *
291 * perf_mmap__put() is done at record__aio_complete()
292 * after started aio request completion or at record__aio_push()
293 * if the request failed to start.
294 */
295 perf_mmap__get(map);
296 }
297
298 aio->size += size;
299
300 return size;
301}
302
303static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
304{
305 int ret, idx;
306 int trace_fd = rec->session->data->file.fd;
307 struct record_aio aio = { .rec = rec, .size = 0 };
308
309 /*
310 * Call record__aio_sync() to wait till map->aio.data[] buffer
311 * becomes available after previous aio write operation.
312 */
313
314 idx = record__aio_sync(map, false);
315 aio.data = map->aio.data[idx];
316 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
317 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
318 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300319
320 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300321 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300322 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300323 *off += aio.size;
324 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300325 if (switch_output_size(rec))
326 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300327 } else {
328 /*
329 * Decrement map->refcount incremented in record__aio_pushfn()
330 * back if record__aio_write() operation failed to start, otherwise
331 * map->refcount is decremented in record__aio_complete() after
332 * aio write operation finishes successfully.
333 */
334 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300335 }
336
337 return ret;
338}
339
340static off_t record__aio_get_pos(int trace_fd)
341{
342 return lseek(trace_fd, 0, SEEK_CUR);
343}
344
345static void record__aio_set_pos(int trace_fd, off_t pos)
346{
347 lseek(trace_fd, pos, SEEK_SET);
348}
349
350static void record__aio_mmap_read_sync(struct record *rec)
351{
352 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200353 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300354 struct perf_mmap *maps = evlist->mmap;
355
Alexey Budankovef781122019-03-18 20:44:12 +0300356 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300357 return;
358
359 for (i = 0; i < evlist->nr_mmaps; i++) {
360 struct perf_mmap *map = &maps[i];
361
362 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300363 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300364 }
365}
366
367static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300368static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300369
370static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300371 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300372 int unset)
373{
374 struct record_opts *opts = (struct record_opts *)opt->value;
375
Alexey Budankov93f20c02018-11-06 12:07:19 +0300376 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300377 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300378 } else {
379 if (str)
380 opts->nr_cblocks = strtol(str, NULL, 0);
381 if (!opts->nr_cblocks)
382 opts->nr_cblocks = nr_cblocks_default;
383 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300384
385 return 0;
386}
387#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300388static int nr_cblocks_max = 0;
389
Alexey Budankovef781122019-03-18 20:44:12 +0300390static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
391 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300392{
393 return -1;
394}
395
396static off_t record__aio_get_pos(int trace_fd __maybe_unused)
397{
398 return -1;
399}
400
401static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
402{
403}
404
405static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
406{
407}
408#endif
409
410static int record__aio_enabled(struct record *rec)
411{
412 return rec->opts.nr_cblocks > 0;
413}
414
Alexey Budankov470530b2019-03-18 20:40:26 +0300415#define MMAP_FLUSH_DEFAULT 1
416static int record__mmap_flush_parse(const struct option *opt,
417 const char *str,
418 int unset)
419{
420 int flush_max;
421 struct record_opts *opts = (struct record_opts *)opt->value;
422 static struct parse_tag tags[] = {
423 { .tag = 'B', .mult = 1 },
424 { .tag = 'K', .mult = 1 << 10 },
425 { .tag = 'M', .mult = 1 << 20 },
426 { .tag = 'G', .mult = 1 << 30 },
427 { .tag = 0 },
428 };
429
430 if (unset)
431 return 0;
432
433 if (str) {
434 opts->mmap_flush = parse_tag_value(str, tags);
435 if (opts->mmap_flush == (int)-1)
436 opts->mmap_flush = strtol(str, NULL, 0);
437 }
438
439 if (!opts->mmap_flush)
440 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
441
442 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
443 flush_max /= 4;
444 if (opts->mmap_flush > flush_max)
445 opts->mmap_flush = flush_max;
446
447 return 0;
448}
449
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300450#ifdef HAVE_ZSTD_SUPPORT
451static unsigned int comp_level_default = 1;
452
453static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
454{
455 struct record_opts *opts = opt->value;
456
457 if (unset) {
458 opts->comp_level = 0;
459 } else {
460 if (str)
461 opts->comp_level = strtol(str, NULL, 0);
462 if (!opts->comp_level)
463 opts->comp_level = comp_level_default;
464 }
465
466 return 0;
467}
468#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300469static unsigned int comp_level_max = 22;
470
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300471static int record__comp_enabled(struct record *rec)
472{
473 return rec->opts.comp_level > 0;
474}
475
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200476static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200477 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300478 struct perf_sample *sample __maybe_unused,
479 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200480{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300481 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200482 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200483}
484
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200485static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300486{
487 struct record *rec = to;
488
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300489 if (record__comp_enabled(rec)) {
490 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
491 bf = map->data;
492 }
493
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300494 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200495 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300496}
497
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300498static volatile int done;
499static volatile int signr = -1;
500static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000501
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300502static void sig_handler(int sig)
503{
504 if (sig == SIGCHLD)
505 child_finished = 1;
506 else
507 signr = sig;
508
509 done = 1;
510}
511
Wang Nana0748652016-11-26 07:03:28 +0000512static void sigsegv_handler(int sig)
513{
514 perf_hooks__recover();
515 sighandler_dump_stack(sig);
516}
517
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300518static void record__sig_exit(void)
519{
520 if (signr == -1)
521 return;
522
523 signal(signr, SIG_DFL);
524 raise(signr);
525}
526
Adrian Huntere31f0d02015-04-30 17:37:27 +0300527#ifdef HAVE_AUXTRACE_SUPPORT
528
Adrian Hunteref149c22015-04-09 18:53:45 +0300529static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200530 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300531 union perf_event *event, void *data1,
532 size_t len1, void *data2, size_t len2)
533{
534 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100535 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300536 size_t padding;
537 u8 pad[8] = {0};
538
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100539 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300540 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100541 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300542 int err;
543
544 file_offset = lseek(fd, 0, SEEK_CUR);
545 if (file_offset == -1)
546 return -1;
547 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
548 event, file_offset);
549 if (err)
550 return err;
551 }
552
Adrian Hunteref149c22015-04-09 18:53:45 +0300553 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
554 padding = (len1 + len2) & 7;
555 if (padding)
556 padding = 8 - padding;
557
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200558 record__write(rec, map, event, event->header.size);
559 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300560 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200561 record__write(rec, map, data2, len2);
562 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300563
564 return 0;
565}
566
567static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200568 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300569{
570 int ret;
571
Jiri Olsae035f4c2018-09-13 14:54:05 +0200572 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300573 record__process_auxtrace);
574 if (ret < 0)
575 return ret;
576
577 if (ret)
578 rec->samples++;
579
580 return 0;
581}
582
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300583static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200584 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300585{
586 int ret;
587
Jiri Olsae035f4c2018-09-13 14:54:05 +0200588 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300589 record__process_auxtrace,
590 rec->opts.auxtrace_snapshot_size);
591 if (ret < 0)
592 return ret;
593
594 if (ret)
595 rec->samples++;
596
597 return 0;
598}
599
600static int record__auxtrace_read_snapshot_all(struct record *rec)
601{
602 int i;
603 int rc = 0;
604
605 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200606 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300607
Jiri Olsae035f4c2018-09-13 14:54:05 +0200608 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300609 continue;
610
Jiri Olsae035f4c2018-09-13 14:54:05 +0200611 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300612 rc = -1;
613 goto out;
614 }
615 }
616out:
617 return rc;
618}
619
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300620static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300621{
622 pr_debug("Recording AUX area tracing snapshot\n");
623 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000624 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300625 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300626 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000627 trigger_error(&auxtrace_snapshot_trigger);
628 else
629 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300630 }
631}
632
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300633static int record__auxtrace_snapshot_exit(struct record *rec)
634{
635 if (trigger_is_error(&auxtrace_snapshot_trigger))
636 return 0;
637
638 if (!auxtrace_record__snapshot_started &&
639 auxtrace_record__snapshot_start(rec->itr))
640 return -1;
641
642 record__read_auxtrace_snapshot(rec, true);
643 if (trigger_is_error(&auxtrace_snapshot_trigger))
644 return -1;
645
646 return 0;
647}
648
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200649static int record__auxtrace_init(struct record *rec)
650{
651 int err;
652
653 if (!rec->itr) {
654 rec->itr = auxtrace_record__init(rec->evlist, &err);
655 if (err)
656 return err;
657 }
658
659 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
660 rec->opts.auxtrace_snapshot_opts);
661 if (err)
662 return err;
663
664 return auxtrace_parse_filters(rec->evlist);
665}
666
Adrian Huntere31f0d02015-04-30 17:37:27 +0300667#else
668
669static inline
670int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200671 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300672{
673 return 0;
674}
675
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300676static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300677void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
678 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300679{
680}
681
682static inline
683int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
684{
685 return 0;
686}
687
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300688static inline
689int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
690{
691 return 0;
692}
693
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200694static int record__auxtrace_init(struct record *rec __maybe_unused)
695{
696 return 0;
697}
698
Adrian Huntere31f0d02015-04-30 17:37:27 +0300699#endif
700
Wang Nancda57a82016-06-27 10:24:03 +0000701static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200702 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000703{
704 struct record_opts *opts = &rec->opts;
705 char msg[512];
706
Alexey Budankovf13de662019-01-22 20:50:57 +0300707 if (opts->affinity != PERF_AFFINITY_SYS)
708 cpu__setup_cpunode_map();
709
Wang Nan7a276ff2017-12-03 02:00:38 +0000710 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000711 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300712 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300713 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300714 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000715 if (errno == EPERM) {
716 pr_err("Permission error mapping pages.\n"
717 "Consider increasing "
718 "/proc/sys/kernel/perf_event_mlock_kb,\n"
719 "or try again with a smaller value of -m/--mmap_pages.\n"
720 "(current value: %u,%u)\n",
721 opts->mmap_pages, opts->auxtrace_mmap_pages);
722 return -errno;
723 } else {
724 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300725 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000726 if (errno)
727 return -errno;
728 else
729 return -EINVAL;
730 }
731 }
732 return 0;
733}
734
735static int record__mmap(struct record *rec)
736{
737 return record__mmap_evlist(rec, rec->evlist);
738}
739
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300740static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200741{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300742 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200743 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200744 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200745 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300746 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600747 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200748
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300749 /*
750 * For initial_delay we need to add a dummy event so that we can track
751 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
752 * real events, the ones asked by the user.
753 */
754 if (opts->initial_delay) {
755 if (perf_evlist__add_dummy(evlist))
756 return -ENOMEM;
757
758 pos = perf_evlist__first(evlist);
759 pos->tracking = 0;
760 pos = perf_evlist__last(evlist);
761 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200762 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300763 }
764
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300765 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100766
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300767 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200768try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200769 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300770 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900771 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300772 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300773 goto try_again;
774 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700775 if ((errno == EINVAL || errno == EBADF) &&
776 pos->leader != pos &&
777 pos->weak_group) {
778 pos = perf_evlist__reset_weak_group(evlist, pos);
779 goto try_again;
780 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300781 rc = -errno;
782 perf_evsel__open_strerror(pos, &opts->target,
783 errno, msg, sizeof(msg));
784 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600785 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300786 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800787
788 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800789 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200790
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -0300791 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
792 pr_warning(
793"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
794"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
795"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
796"file is not found in the buildid cache or in the vmlinux path.\n\n"
797"Samples in kernel modules won't be resolved at all.\n\n"
798"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
799"even with a suitable vmlinux or kallsyms file.\n\n");
800 }
801
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300802 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300803 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300804 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300805 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600806 rc = -1;
807 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100808 }
809
Wang Nancda57a82016-06-27 10:24:03 +0000810 rc = record__mmap(rec);
811 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600812 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200813
Jiri Olsa563aecb2013-06-05 13:35:06 +0200814 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300815 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600816out:
817 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200818}
819
Namhyung Kime3d59112015-01-29 17:06:44 +0900820static int process_sample_event(struct perf_tool *tool,
821 union perf_event *event,
822 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200823 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900824 struct machine *machine)
825{
826 struct record *rec = container_of(tool, struct record, tool);
827
Jin Yao68588ba2017-12-08 21:13:42 +0800828 if (rec->evlist->first_sample_time == 0)
829 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900830
Jin Yao68588ba2017-12-08 21:13:42 +0800831 rec->evlist->last_sample_time = sample->time;
832
833 if (rec->buildid_all)
834 return 0;
835
836 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900837 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
838}
839
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300840static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200841{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200842 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200843
Jiri Olsa45112e82019-02-21 10:41:29 +0100844 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300845 return 0;
846
Namhyung Kim00dc8652014-11-04 10:14:32 +0900847 /*
848 * During this process, it'll load kernel map and replace the
849 * dso->long_name to a real pathname it found. In this case
850 * we prefer the vmlinux path like
851 * /lib/modules/3.16.4/build/vmlinux
852 *
853 * rather than build-id path (in debug directory).
854 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
855 */
856 symbol_conf.ignore_vmlinux_buildid = true;
857
Namhyung Kim61566812016-01-11 22:37:09 +0900858 /*
859 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800860 * so no need to process samples. But if timestamp_boundary is enabled,
861 * it still needs to walk on all samples to get the timestamps of
862 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900863 */
Jin Yao68588ba2017-12-08 21:13:42 +0800864 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900865 rec->tool.sample = NULL;
866
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300867 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200868}
869
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200870static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800871{
872 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200873 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800874 /*
875 *As for guest kernel when processing subcommand record&report,
876 *we arrange module mmap prior to guest kernel mmap and trigger
877 *a preload dso because default guest module symbols are loaded
878 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
879 *method is used to avoid symbol missing when the first addr is
880 *in module instead of in guest kernel.
881 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200882 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200883 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800884 if (err < 0)
885 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300886 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800887
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800888 /*
889 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
890 * have no _text sometimes.
891 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200892 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200893 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800894 if (err < 0)
895 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300896 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800897}
898
Frederic Weisbecker98402802010-05-02 22:05:29 +0200899static struct perf_event_header finished_round_event = {
900 .size = sizeof(struct perf_event_header),
901 .type = PERF_RECORD_FINISHED_ROUND,
902};
903
Alexey Budankovf13de662019-01-22 20:50:57 +0300904static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
905{
906 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
907 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
908 CPU_ZERO(&rec->affinity_mask);
909 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
910 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
911 }
912}
913
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300914static size_t process_comp_header(void *record, size_t increment)
915{
Jiri Olsa72932372019-08-28 15:57:16 +0200916 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300917 size_t size = sizeof(*event);
918
919 if (increment) {
920 event->header.size += increment;
921 return increment;
922 }
923
924 event->header.type = PERF_RECORD_COMPRESSED;
925 event->header.size = size;
926
927 return size;
928}
929
930static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
931 void *src, size_t src_size)
932{
933 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200934 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300935
936 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
937 max_record_size, process_comp_header);
938
939 session->bytes_transferred += src_size;
940 session->bytes_compressed += compressed;
941
942 return compressed;
943}
944
Jiri Olsa63503db2019-07-21 13:23:52 +0200945static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300946 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200947{
Jiri Olsadcabb502014-07-25 16:56:16 +0200948 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200949 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600950 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000951 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300952 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300953 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200954
Wang Nancb216862016-06-27 10:24:04 +0000955 if (!evlist)
956 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300957
Wang Nan0b72d692017-12-04 16:51:07 +0000958 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000959 if (!maps)
960 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000961
Wang Nan0b72d692017-12-04 16:51:07 +0000962 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000963 return 0;
964
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300965 if (record__aio_enabled(rec))
966 off = record__aio_get_pos(trace_fd);
967
Wang Nana4ea0ec2016-07-14 08:34:36 +0000968 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300969 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200970 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000971
Jiri Olsae035f4c2018-09-13 14:54:05 +0200972 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300973 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300974 if (synch) {
975 flush = map->flush;
976 map->flush = 1;
977 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300978 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300979 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300980 if (synch)
981 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300982 rc = -1;
983 goto out;
984 }
985 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300986 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300987 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300988 if (synch)
989 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300990 rc = -1;
991 goto out;
992 }
David Ahern8d3eca22012-08-26 12:24:47 -0600993 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300994 if (synch)
995 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600996 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300997
Jiri Olsae035f4c2018-09-13 14:54:05 +0200998 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
999 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001000 rc = -1;
1001 goto out;
1002 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001003 }
1004
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001005 if (record__aio_enabled(rec))
1006 record__aio_set_pos(trace_fd, off);
1007
Jiri Olsadcabb502014-07-25 16:56:16 +02001008 /*
1009 * Mark the round finished in case we wrote
1010 * at least one event.
1011 */
1012 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001013 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001014
Wang Nan0b72d692017-12-04 16:51:07 +00001015 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001016 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001017out:
1018 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001019}
1020
Alexey Budankov470530b2019-03-18 20:40:26 +03001021static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001022{
1023 int err;
1024
Alexey Budankov470530b2019-03-18 20:40:26 +03001025 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001026 if (err)
1027 return err;
1028
Alexey Budankov470530b2019-03-18 20:40:26 +03001029 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001030}
1031
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001032static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001033{
David Ahern57706ab2013-11-06 11:41:34 -07001034 struct perf_session *session = rec->session;
1035 int feat;
1036
1037 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1038 perf_header__set_feat(&session->header, feat);
1039
1040 if (rec->no_buildid)
1041 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1042
Jiri Olsace9036a2019-07-21 13:24:23 +02001043 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001044 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1045
1046 if (!rec->opts.branch_stack)
1047 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001048
1049 if (!rec->opts.full_auxtrace)
1050 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001051
Alexey Budankovcf790512018-10-09 17:36:24 +03001052 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1053 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1054
Jiri Olsa258031c2019-03-08 14:47:39 +01001055 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001056 if (!record__comp_enabled(rec))
1057 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001058
Jiri Olsaffa517a2015-10-25 15:51:43 +01001059 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001060}
1061
Wang Nane1ab48b2016-02-26 09:32:10 +00001062static void
1063record__finish_output(struct record *rec)
1064{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001065 struct perf_data *data = &rec->data;
1066 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001067
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001068 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001069 return;
1070
1071 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001072 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001073
1074 if (!rec->no_buildid) {
1075 process_buildids(rec);
1076
1077 if (rec->buildid_all)
1078 dsos__hit_all(rec->session);
1079 }
1080 perf_session__write_header(rec->session, rec->evlist, fd, true);
1081
1082 return;
1083}
1084
Wang Nan4ea648a2016-07-14 08:34:47 +00001085static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001086{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001087 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001088 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001089
Wang Nan4ea648a2016-07-14 08:34:47 +00001090 if (rec->opts.tail_synthesize != tail)
1091 return 0;
1092
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001093 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1094 if (thread_map == NULL)
1095 return -1;
1096
1097 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001098 process_synthesized_event,
1099 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001100 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001101 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001102 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001103}
1104
Wang Nan4ea648a2016-07-14 08:34:47 +00001105static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001106
Wang Nanecfd7a92016-04-13 08:21:07 +00001107static int
1108record__switch_output(struct record *rec, bool at_exit)
1109{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001110 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001111 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001112 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001113
1114 /* Same Size: "2015122520103046"*/
1115 char timestamp[] = "InvalidTimestamp";
1116
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001117 record__aio_mmap_read_sync(rec);
1118
Wang Nan4ea648a2016-07-14 08:34:47 +00001119 record__synthesize(rec, true);
1120 if (target__none(&rec->opts.target))
1121 record__synthesize_workload(rec, true);
1122
Wang Nanecfd7a92016-04-13 08:21:07 +00001123 rec->samples = 0;
1124 record__finish_output(rec);
1125 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1126 if (err) {
1127 pr_err("Failed to get current timestamp\n");
1128 return -EINVAL;
1129 }
1130
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001131 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001132 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001133 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001134 if (fd >= 0 && !at_exit) {
1135 rec->bytes_written = 0;
1136 rec->session->header.data_size = 0;
1137 }
1138
1139 if (!quiet)
1140 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001141 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001142
Andi Kleen03724b22019-03-14 15:49:55 -07001143 if (rec->switch_output.num_files) {
1144 int n = rec->switch_output.cur_file + 1;
1145
1146 if (n >= rec->switch_output.num_files)
1147 n = 0;
1148 rec->switch_output.cur_file = n;
1149 if (rec->switch_output.filenames[n]) {
1150 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001151 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001152 }
1153 rec->switch_output.filenames[n] = new_filename;
1154 } else {
1155 free(new_filename);
1156 }
1157
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001158 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001159 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001160 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001161
Wang Nanbe7b0c92016-04-20 18:59:54 +00001162 /*
1163 * In 'perf record --switch-output' without -a,
1164 * record__synthesize() in record__switch_output() won't
1165 * generate tracking events because there's no thread_map
1166 * in evlist. Which causes newly created perf.data doesn't
1167 * contain map and comm information.
1168 * Create a fake thread_map and directly call
1169 * perf_event__synthesize_thread_map() for those events.
1170 */
1171 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001172 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001173 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001174 return fd;
1175}
1176
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001177static volatile int workload_exec_errno;
1178
1179/*
1180 * perf_evlist__prepare_workload will send a SIGUSR1
1181 * if the fork fails, since we asked by setting its
1182 * want_signal to true.
1183 */
Namhyung Kim45604712014-05-12 09:47:24 +09001184static void workload_exec_failed_signal(int signo __maybe_unused,
1185 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001186 void *ucontext __maybe_unused)
1187{
1188 workload_exec_errno = info->si_value.sival_int;
1189 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001190 child_finished = 1;
1191}
1192
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001193static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001194static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001195
Wang Nanee667f92016-06-27 10:24:05 +00001196static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001197perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001198{
Wang Nanb2cb6152016-07-14 08:34:39 +00001199 if (evlist) {
1200 if (evlist->mmap && evlist->mmap[0].base)
1201 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001202 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1203 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001204 }
Wang Nanee667f92016-06-27 10:24:05 +00001205 return NULL;
1206}
1207
Wang Nanc45628b2016-05-24 02:28:59 +00001208static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1209{
Wang Nanee667f92016-06-27 10:24:05 +00001210 const struct perf_event_mmap_page *pc;
1211
1212 pc = perf_evlist__pick_pc(rec->evlist);
1213 if (pc)
1214 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001215 return NULL;
1216}
1217
Wang Nan4ea648a2016-07-14 08:34:47 +00001218static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001219{
1220 struct perf_session *session = rec->session;
1221 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001222 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001223 struct record_opts *opts = &rec->opts;
1224 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001225 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001226 int err = 0;
1227
Wang Nan4ea648a2016-07-14 08:34:47 +00001228 if (rec->opts.tail_synthesize != tail)
1229 return 0;
1230
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001231 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001232 /*
1233 * We need to synthesize events first, because some
1234 * features works on top of them (on report side).
1235 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001236 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001237 process_synthesized_event);
1238 if (err < 0) {
1239 pr_err("Couldn't synthesize attrs.\n");
1240 goto out;
1241 }
1242
Jiri Olsaa2015512018-03-14 10:22:04 +01001243 err = perf_event__synthesize_features(tool, session, rec->evlist,
1244 process_synthesized_event);
1245 if (err < 0) {
1246 pr_err("Couldn't synthesize features.\n");
1247 return err;
1248 }
1249
Jiri Olsace9036a2019-07-21 13:24:23 +02001250 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001251 /*
1252 * FIXME err <= 0 here actually means that
1253 * there were no tracepoints so its not really
1254 * an error, just that we don't need to
1255 * synthesize anything. We really have to
1256 * return this more properly and also
1257 * propagate errors that now are calling die()
1258 */
1259 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1260 process_synthesized_event);
1261 if (err <= 0) {
1262 pr_err("Couldn't record tracing data.\n");
1263 goto out;
1264 }
1265 rec->bytes_written += err;
1266 }
1267 }
1268
Wang Nanc45628b2016-05-24 02:28:59 +00001269 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001270 process_synthesized_event, machine);
1271 if (err)
1272 goto out;
1273
Wang Nanc45c86e2016-02-26 09:32:07 +00001274 if (rec->opts.full_auxtrace) {
1275 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1276 session, process_synthesized_event);
1277 if (err)
1278 goto out;
1279 }
1280
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001281 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1282 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1283 machine);
1284 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1285 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1286 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001287
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001288 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1289 machine);
1290 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1291 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1292 "Check /proc/modules permission or run as root.\n");
1293 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001294
1295 if (perf_guest) {
1296 machines__process_guests(&session->machines,
1297 perf_event__synthesize_guest_os, tool);
1298 }
1299
Andi Kleenbfd8f722017-11-17 13:42:58 -08001300 err = perf_event__synthesize_extra_attr(&rec->tool,
1301 rec->evlist,
1302 process_synthesized_event,
1303 data->is_pipe);
1304 if (err)
1305 goto out;
1306
Jiri Olsa03617c22019-07-21 13:24:42 +02001307 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001308 process_synthesized_event,
1309 NULL);
1310 if (err < 0) {
1311 pr_err("Couldn't synthesize thread map.\n");
1312 return err;
1313 }
1314
Jiri Olsaf72f9012019-07-21 13:24:41 +02001315 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001316 process_synthesized_event, NULL);
1317 if (err < 0) {
1318 pr_err("Couldn't synthesize cpu map.\n");
1319 return err;
1320 }
1321
Song Liue5416952019-03-11 22:30:41 -07001322 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001323 machine, opts);
1324 if (err < 0)
1325 pr_warning("Couldn't synthesize bpf events.\n");
1326
Jiri Olsa03617c22019-07-21 13:24:42 +02001327 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001328 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001329 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001330out:
1331 return err;
1332}
1333
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001334static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001335{
David Ahern57706ab2013-11-06 11:41:34 -07001336 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001337 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001338 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001339 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001340 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001341 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001342 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001343 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001344 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001345 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001346 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001347 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001348
Namhyung Kim45604712014-05-12 09:47:24 +09001349 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001350 signal(SIGCHLD, sig_handler);
1351 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001352 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001353 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001354
Hari Bathinif3b36142017-03-08 02:11:43 +05301355 if (rec->opts.record_namespaces)
1356 tool->namespace_events = true;
1357
Jiri Olsadc0c6122017-01-09 10:51:58 +01001358 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001359 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001360 if (rec->opts.auxtrace_snapshot_mode)
1361 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001362 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001363 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001364 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001365 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001366 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001367
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001368 session = perf_session__new(data, false, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301369 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09001370 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301371 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001372 }
1373
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001374 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001375 rec->session = session;
1376
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001377 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1378 pr_err("Compression initialization failed.\n");
1379 return -1;
1380 }
1381
1382 session->header.env.comp_type = PERF_COMP_ZSTD;
1383 session->header.env.comp_level = rec->opts.comp_level;
1384
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001385 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001386
Alexey Budankovcf790512018-10-09 17:36:24 +03001387 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1388 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1389
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001390 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001391 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001392 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001393 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001394 if (err < 0) {
1395 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001396 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001397 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001398 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001399 }
1400
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001401 /*
1402 * If we have just single event and are sending data
1403 * through pipe, we need to force the ids allocation,
1404 * because we synthesize event name through the pipe
1405 * and need the id for that.
1406 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001407 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001408 rec->opts.sample_id = true;
1409
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001410 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001411 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001412 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001413 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001414 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001415
Wang Nan8690a2a2016-02-22 09:10:32 +00001416 err = bpf__apply_obj_config();
1417 if (err) {
1418 char errbuf[BUFSIZ];
1419
1420 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1421 pr_err("ERROR: Apply config to BPF failed: %s\n",
1422 errbuf);
1423 goto out_child;
1424 }
1425
Adrian Huntercca84822015-08-19 17:29:21 +03001426 /*
1427 * Normally perf_session__new would do this, but it doesn't have the
1428 * evlist.
1429 */
1430 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1431 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1432 rec->tool.ordered_events = false;
1433 }
1434
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001435 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001436 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1437
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001438 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001439 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001440 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001441 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001442 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001443 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001444 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001445 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001446 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001447
David Ahernd3665492012-02-06 15:27:52 -07001448 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001449 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001450 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001451 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001452 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001453 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001454 }
1455
Song Liud56354d2019-03-11 22:30:51 -07001456 if (!opts->no_bpf_event)
1457 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1458
Song Liu657ee552019-03-11 22:30:50 -07001459 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1460 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1461 opts->no_bpf_event = true;
1462 }
1463
Wang Nan4ea648a2016-07-14 08:34:47 +00001464 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001465 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001466 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001467
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001468 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001469 struct sched_param param;
1470
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001471 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001472 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001473 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001474 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001475 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001476 }
1477 }
1478
Jiri Olsa774cb492012-11-12 18:34:01 +01001479 /*
1480 * When perf is starting the traced process, all the events
1481 * (apart from group members) have enable_on_exec=1 set,
1482 * so don't spoil it by prematurely enabling them.
1483 */
Andi Kleen6619a532014-01-11 13:38:27 -08001484 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001485 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001486
Peter Zijlstra856e9662009-12-16 17:55:55 +01001487 /*
1488 * Let the child rip
1489 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001490 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001491 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001492 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301493 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001494
1495 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1496 if (event == NULL) {
1497 err = -ENOMEM;
1498 goto out_child;
1499 }
1500
Namhyung Kime803cf92015-09-22 09:24:55 +09001501 /*
1502 * Some H/W events are generated before COMM event
1503 * which is emitted during exec(), so perf script
1504 * cannot see a correct process name for those events.
1505 * Synthesize COMM event to prevent it.
1506 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301507 tgid = perf_event__synthesize_comm(tool, event,
1508 rec->evlist->workload.pid,
1509 process_synthesized_event,
1510 machine);
1511 free(event);
1512
1513 if (tgid == -1)
1514 goto out_child;
1515
1516 event = malloc(sizeof(event->namespaces) +
1517 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1518 machine->id_hdr_size);
1519 if (event == NULL) {
1520 err = -ENOMEM;
1521 goto out_child;
1522 }
1523
1524 /*
1525 * Synthesize NAMESPACES event for the command specified.
1526 */
1527 perf_event__synthesize_namespaces(tool, event,
1528 rec->evlist->workload.pid,
1529 tgid, process_synthesized_event,
1530 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001531 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001532
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001533 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001534 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001535
Andi Kleen6619a532014-01-11 13:38:27 -08001536 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001537 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001538 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001539 }
1540
Wang Nan5f9cf592016-04-20 18:59:49 +00001541 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001542 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001543 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001544 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001545 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001546
Wang Nan057374642016-07-14 08:34:43 +00001547 /*
1548 * rec->evlist->bkw_mmap_state is possible to be
1549 * BKW_MMAP_EMPTY here: when done == true and
1550 * hits != rec->samples in previous round.
1551 *
1552 * perf_evlist__toggle_bkw_mmap ensure we never
1553 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1554 */
1555 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1556 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1557
Alexey Budankov470530b2019-03-18 20:40:26 +03001558 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001559 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001560 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001561 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001562 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001563 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001564
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001565 if (auxtrace_record__snapshot_started) {
1566 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001567 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001568 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001569 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001570 pr_err("AUX area tracing snapshot failed\n");
1571 err = -1;
1572 goto out_child;
1573 }
1574 }
1575
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001576 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001577 /*
1578 * If switch_output_trigger is hit, the data in
1579 * overwritable ring buffer should have been collected,
1580 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1581 *
1582 * If SIGUSR2 raise after or during record__mmap_read_all(),
1583 * record__mmap_read_all() didn't collect data from
1584 * overwritable ring buffer. Read again.
1585 */
1586 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1587 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001588 trigger_ready(&switch_output_trigger);
1589
Wang Nan057374642016-07-14 08:34:43 +00001590 /*
1591 * Reenable events in overwrite ring buffer after
1592 * record__mmap_read_all(): we should have collected
1593 * data from it.
1594 */
1595 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1596
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001597 if (!quiet)
1598 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1599 waking);
1600 waking = 0;
1601 fd = record__switch_output(rec, false);
1602 if (fd < 0) {
1603 pr_err("Failed to switch to new file\n");
1604 trigger_error(&switch_output_trigger);
1605 err = fd;
1606 goto out_child;
1607 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001608
1609 /* re-arm the alarm */
1610 if (rec->switch_output.time)
1611 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001612 }
1613
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001614 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001615 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001616 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001617 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001618 /*
1619 * Propagate error, only if there's any. Ignore positive
1620 * number of returned events and interrupt error.
1621 */
1622 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001623 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001624 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001625
1626 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1627 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001628 }
1629
Jiri Olsa774cb492012-11-12 18:34:01 +01001630 /*
1631 * When perf is starting the traced process, at the end events
1632 * die with the process and we wait for that. Thus no need to
1633 * disable events in this case.
1634 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001635 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001636 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001637 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001638 disabled = true;
1639 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001640 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001641
Wang Nan5f9cf592016-04-20 18:59:49 +00001642 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001643 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001644
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001645 if (opts->auxtrace_snapshot_on_exit)
1646 record__auxtrace_snapshot_exit(rec);
1647
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001648 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001649 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001650 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001651 pr_err("Workload failed: %s\n", emsg);
1652 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001653 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001654 }
1655
Namhyung Kime3d59112015-01-29 17:06:44 +09001656 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001657 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001658
Wang Nan4ea648a2016-07-14 08:34:47 +00001659 if (target__none(&rec->opts.target))
1660 record__synthesize_workload(rec, true);
1661
Namhyung Kim45604712014-05-12 09:47:24 +09001662out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001663 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001664 record__aio_mmap_read_sync(rec);
1665
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001666 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1667 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1668 session->header.env.comp_ratio = ratio + 0.5;
1669 }
1670
Namhyung Kim45604712014-05-12 09:47:24 +09001671 if (forks) {
1672 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001673
Namhyung Kim45604712014-05-12 09:47:24 +09001674 if (!child_finished)
1675 kill(rec->evlist->workload.pid, SIGTERM);
1676
1677 wait(&exit_status);
1678
1679 if (err < 0)
1680 status = err;
1681 else if (WIFEXITED(exit_status))
1682 status = WEXITSTATUS(exit_status);
1683 else if (WIFSIGNALED(exit_status))
1684 signr = WTERMSIG(exit_status);
1685 } else
1686 status = err;
1687
Wang Nan4ea648a2016-07-14 08:34:47 +00001688 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001689 /* this will be recalculated during process_buildids() */
1690 rec->samples = 0;
1691
Wang Nanecfd7a92016-04-13 08:21:07 +00001692 if (!err) {
1693 if (!rec->timestamp_filename) {
1694 record__finish_output(rec);
1695 } else {
1696 fd = record__switch_output(rec, true);
1697 if (fd < 0) {
1698 status = fd;
1699 goto out_delete_session;
1700 }
1701 }
1702 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001703
Wang Nana0748652016-11-26 07:03:28 +00001704 perf_hooks__invoke_record_end();
1705
Namhyung Kime3d59112015-01-29 17:06:44 +09001706 if (!err && !quiet) {
1707 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001708 const char *postfix = rec->timestamp_filename ?
1709 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001710
Adrian Hunteref149c22015-04-09 18:53:45 +03001711 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001712 scnprintf(samples, sizeof(samples),
1713 " (%" PRIu64 " samples)", rec->samples);
1714 else
1715 samples[0] = '\0';
1716
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001717 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001718 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001719 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001720 if (ratio) {
1721 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1722 rec->session->bytes_transferred / 1024.0 / 1024.0,
1723 ratio);
1724 }
1725 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001726 }
1727
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001728out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001729 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001730 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001731
1732 if (!opts->no_bpf_event)
1733 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001734 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001735}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001736
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001737static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001738{
Kan Liangaad2b212015-01-05 13:23:04 -05001739 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001740
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001741 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001742
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001743 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001744 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001745 callchain->dump_size);
1746}
1747
1748int record_opts__parse_callchain(struct record_opts *record,
1749 struct callchain_param *callchain,
1750 const char *arg, bool unset)
1751{
1752 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001753 callchain->enabled = !unset;
1754
1755 /* --no-call-graph */
1756 if (unset) {
1757 callchain->record_mode = CALLCHAIN_NONE;
1758 pr_debug("callchain: disabled\n");
1759 return 0;
1760 }
1761
1762 ret = parse_callchain_record_opt(arg, callchain);
1763 if (!ret) {
1764 /* Enable data address sampling for DWARF unwind. */
1765 if (callchain->record_mode == CALLCHAIN_DWARF)
1766 record->sample_address = true;
1767 callchain_debug(callchain);
1768 }
1769
1770 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001771}
1772
Kan Liangc421e802015-07-29 05:42:12 -04001773int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001774 const char *arg,
1775 int unset)
1776{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001777 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001778}
1779
Kan Liangc421e802015-07-29 05:42:12 -04001780int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001781 const char *arg __maybe_unused,
1782 int unset __maybe_unused)
1783{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001784 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001785
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001786 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001787
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001788 if (callchain->record_mode == CALLCHAIN_NONE)
1789 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001790
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001791 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001792 return 0;
1793}
1794
Jiri Olsaeb853e82014-02-03 12:44:42 +01001795static int perf_record_config(const char *var, const char *value, void *cb)
1796{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001797 struct record *rec = cb;
1798
1799 if (!strcmp(var, "record.build-id")) {
1800 if (!strcmp(value, "cache"))
1801 rec->no_buildid_cache = false;
1802 else if (!strcmp(value, "no-cache"))
1803 rec->no_buildid_cache = true;
1804 else if (!strcmp(value, "skip"))
1805 rec->no_buildid = true;
1806 else
1807 return -1;
1808 return 0;
1809 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001810 if (!strcmp(var, "record.call-graph")) {
1811 var = "call-graph.record-mode";
1812 return perf_default_config(var, value, cb);
1813 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001814#ifdef HAVE_AIO_SUPPORT
1815 if (!strcmp(var, "record.aio")) {
1816 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1817 if (!rec->opts.nr_cblocks)
1818 rec->opts.nr_cblocks = nr_cblocks_default;
1819 }
1820#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001821
Yisheng Xiecff17202018-03-12 19:25:57 +08001822 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001823}
1824
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001825struct clockid_map {
1826 const char *name;
1827 int clockid;
1828};
1829
1830#define CLOCKID_MAP(n, c) \
1831 { .name = n, .clockid = (c), }
1832
1833#define CLOCKID_END { .name = NULL, }
1834
1835
1836/*
1837 * Add the missing ones, we need to build on many distros...
1838 */
1839#ifndef CLOCK_MONOTONIC_RAW
1840#define CLOCK_MONOTONIC_RAW 4
1841#endif
1842#ifndef CLOCK_BOOTTIME
1843#define CLOCK_BOOTTIME 7
1844#endif
1845#ifndef CLOCK_TAI
1846#define CLOCK_TAI 11
1847#endif
1848
1849static const struct clockid_map clockids[] = {
1850 /* available for all events, NMI safe */
1851 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1852 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1853
1854 /* available for some events */
1855 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1856 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1857 CLOCKID_MAP("tai", CLOCK_TAI),
1858
1859 /* available for the lazy */
1860 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1861 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1862 CLOCKID_MAP("real", CLOCK_REALTIME),
1863 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1864
1865 CLOCKID_END,
1866};
1867
Alexey Budankovcf790512018-10-09 17:36:24 +03001868static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1869{
1870 struct timespec res;
1871
1872 *res_ns = 0;
1873 if (!clock_getres(clk_id, &res))
1874 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1875 else
1876 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1877
1878 return 0;
1879}
1880
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001881static int parse_clockid(const struct option *opt, const char *str, int unset)
1882{
1883 struct record_opts *opts = (struct record_opts *)opt->value;
1884 const struct clockid_map *cm;
1885 const char *ostr = str;
1886
1887 if (unset) {
1888 opts->use_clockid = 0;
1889 return 0;
1890 }
1891
1892 /* no arg passed */
1893 if (!str)
1894 return 0;
1895
1896 /* no setting it twice */
1897 if (opts->use_clockid)
1898 return -1;
1899
1900 opts->use_clockid = true;
1901
1902 /* if its a number, we're done */
1903 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001904 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001905
1906 /* allow a "CLOCK_" prefix to the name */
1907 if (!strncasecmp(str, "CLOCK_", 6))
1908 str += 6;
1909
1910 for (cm = clockids; cm->name; cm++) {
1911 if (!strcasecmp(str, cm->name)) {
1912 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001913 return get_clockid_res(opts->clockid,
1914 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001915 }
1916 }
1917
1918 opts->use_clockid = false;
1919 ui__warning("unknown clockid %s, check man page\n", ostr);
1920 return -1;
1921}
1922
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001923static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1924{
1925 struct record_opts *opts = (struct record_opts *)opt->value;
1926
1927 if (unset || !str)
1928 return 0;
1929
1930 if (!strcasecmp(str, "node"))
1931 opts->affinity = PERF_AFFINITY_NODE;
1932 else if (!strcasecmp(str, "cpu"))
1933 opts->affinity = PERF_AFFINITY_CPU;
1934
1935 return 0;
1936}
1937
Adrian Huntere9db1312015-04-09 18:53:46 +03001938static int record__parse_mmap_pages(const struct option *opt,
1939 const char *str,
1940 int unset __maybe_unused)
1941{
1942 struct record_opts *opts = opt->value;
1943 char *s, *p;
1944 unsigned int mmap_pages;
1945 int ret;
1946
1947 if (!str)
1948 return -EINVAL;
1949
1950 s = strdup(str);
1951 if (!s)
1952 return -ENOMEM;
1953
1954 p = strchr(s, ',');
1955 if (p)
1956 *p = '\0';
1957
1958 if (*s) {
1959 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1960 if (ret)
1961 goto out_free;
1962 opts->mmap_pages = mmap_pages;
1963 }
1964
1965 if (!p) {
1966 ret = 0;
1967 goto out_free;
1968 }
1969
1970 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1971 if (ret)
1972 goto out_free;
1973
1974 opts->auxtrace_mmap_pages = mmap_pages;
1975
1976out_free:
1977 free(s);
1978 return ret;
1979}
1980
Jiri Olsa0c582442017-01-09 10:51:59 +01001981static void switch_output_size_warn(struct record *rec)
1982{
1983 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1984 struct switch_output *s = &rec->switch_output;
1985
1986 wakeup_size /= 2;
1987
1988 if (s->size < wakeup_size) {
1989 char buf[100];
1990
1991 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1992 pr_warning("WARNING: switch-output data size lower than "
1993 "wakeup kernel buffer size (%s) "
1994 "expect bigger perf.data sizes\n", buf);
1995 }
1996}
1997
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001998static int switch_output_setup(struct record *rec)
1999{
2000 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002001 static struct parse_tag tags_size[] = {
2002 { .tag = 'B', .mult = 1 },
2003 { .tag = 'K', .mult = 1 << 10 },
2004 { .tag = 'M', .mult = 1 << 20 },
2005 { .tag = 'G', .mult = 1 << 30 },
2006 { .tag = 0 },
2007 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002008 static struct parse_tag tags_time[] = {
2009 { .tag = 's', .mult = 1 },
2010 { .tag = 'm', .mult = 60 },
2011 { .tag = 'h', .mult = 60*60 },
2012 { .tag = 'd', .mult = 60*60*24 },
2013 { .tag = 0 },
2014 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002015 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002016
2017 if (!s->set)
2018 return 0;
2019
2020 if (!strcmp(s->str, "signal")) {
2021 s->signal = true;
2022 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002023 goto enabled;
2024 }
2025
2026 val = parse_tag_value(s->str, tags_size);
2027 if (val != (unsigned long) -1) {
2028 s->size = val;
2029 pr_debug("switch-output with %s size threshold\n", s->str);
2030 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002031 }
2032
Jiri Olsabfacbe32017-01-09 10:52:00 +01002033 val = parse_tag_value(s->str, tags_time);
2034 if (val != (unsigned long) -1) {
2035 s->time = val;
2036 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2037 s->str, s->time);
2038 goto enabled;
2039 }
2040
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002041 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002042
2043enabled:
2044 rec->timestamp_filename = true;
2045 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002046
2047 if (s->size && !rec->opts.no_buffering)
2048 switch_output_size_warn(rec);
2049
Jiri Olsadc0c6122017-01-09 10:51:58 +01002050 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002051}
2052
Namhyung Kime5b2c202014-10-23 00:15:46 +09002053static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002054 "perf record [<options>] [<command>]",
2055 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002056 NULL
2057};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002058const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002059
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002060/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002061 * XXX Ideally would be local to cmd_record() and passed to a record__new
2062 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002063 * after cmd_record() exits, but since record_options need to be accessible to
2064 * builtin-script, leave it here.
2065 *
2066 * At least we don't ouch it in all the other functions here directly.
2067 *
2068 * Just say no to tons of global variables, sigh.
2069 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002070static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002071 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002072 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002073 .mmap_pages = UINT_MAX,
2074 .user_freq = UINT_MAX,
2075 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002076 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002077 .target = {
2078 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002079 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002080 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002081 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002082 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002083 .tool = {
2084 .sample = process_sample_event,
2085 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002086 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002087 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302088 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002089 .mmap = perf_event__process_mmap,
2090 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002091 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002092 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002093};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002094
Namhyung Kim76a26542015-10-22 23:28:32 +09002095const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2096 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002097
Wang Nan0aab2132016-06-16 08:02:41 +00002098static bool dry_run;
2099
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002100/*
2101 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2102 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002103 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002104 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2105 * using pipes, etc.
2106 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002107static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002108 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002109 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002110 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002111 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002112 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002113 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2114 NULL, "don't record events from perf itself",
2115 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002116 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002117 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002118 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002119 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002120 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002121 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002122 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002123 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002124 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002125 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002126 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002127 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002128 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002129 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002130 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002131 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002132 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002133 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2134 &record.opts.no_inherit_set,
2135 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002136 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2137 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002138 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002139 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002140 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2141 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002142 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2143 "profile at this frequency",
2144 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002145 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2146 "number of mmap data pages and AUX area tracing mmap pages",
2147 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002148 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2149 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2150 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002151 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002152 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002153 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002154 NULL, "enables call-graph recording" ,
2155 &record_callchain_opt),
2156 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002157 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002158 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002159 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002160 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002161 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002162 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002163 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002164 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002165 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2166 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002167 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002168 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2169 &record.opts.sample_time_set,
2170 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002171 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2172 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002173 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002174 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002175 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2176 &record.no_buildid_cache_set,
2177 "do not update the buildid cache"),
2178 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2179 &record.no_buildid_set,
2180 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002181 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002182 "monitor event in cgroup name only",
2183 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002184 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002185 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002186 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2187 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002188
2189 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2190 "branch any", "sample any taken branches",
2191 parse_branch_stack),
2192
2193 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2194 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002195 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002196 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2197 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002198 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2199 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002200 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2201 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002202 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2203 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002204 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002205 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2206 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002207 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002208 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2209 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002210 OPT_CALLBACK('k', "clockid", &record.opts,
2211 "clockid", "clockid to use for events, see clock_gettime()",
2212 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002213 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2214 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002215 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002216 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302217 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2218 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002219 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2220 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002221 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2222 "Configure all used events to run in kernel space.",
2223 PARSE_OPT_EXCLUSIVE),
2224 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2225 "Configure all used events to run in user space.",
2226 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002227 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2228 "collect kernel callchains"),
2229 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2230 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002231 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2232 "clang binary to use for compiling BPF scriptlets"),
2233 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2234 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002235 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2236 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002237 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2238 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002239 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2240 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002241 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2242 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002243 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002244 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2245 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002246 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002247 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2248 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002249 OPT_BOOLEAN(0, "dry-run", &dry_run,
2250 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002251#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002252 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2253 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002254 record__aio_parse),
2255#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002256 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2257 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2258 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002259#ifdef HAVE_ZSTD_SUPPORT
2260 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2261 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2262 record__parse_comp_level),
2263#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002264 OPT_END()
2265};
2266
Namhyung Kime5b2c202014-10-23 00:15:46 +09002267struct option *record_options = __record_options;
2268
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002269int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002270{
Adrian Hunteref149c22015-04-09 18:53:45 +03002271 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002272 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002273 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002274
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002275 setlocale(LC_ALL, "");
2276
Wang Nan48e1cab2015-12-14 10:39:22 +00002277#ifndef HAVE_LIBBPF_SUPPORT
2278# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2279 set_nobuild('\0', "clang-path", true);
2280 set_nobuild('\0', "clang-opt", true);
2281# undef set_nobuild
2282#endif
2283
He Kuang7efe0e02015-12-14 10:39:23 +00002284#ifndef HAVE_BPF_PROLOGUE
2285# if !defined (HAVE_DWARF_SUPPORT)
2286# define REASON "NO_DWARF=1"
2287# elif !defined (HAVE_LIBBPF_SUPPORT)
2288# define REASON "NO_LIBBPF=1"
2289# else
2290# define REASON "this architecture doesn't support BPF prologue"
2291# endif
2292# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2293 set_nobuild('\0', "vmlinux", true);
2294# undef set_nobuild
2295# undef REASON
2296#endif
2297
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002298 CPU_ZERO(&rec->affinity_mask);
2299 rec->opts.affinity = PERF_AFFINITY_SYS;
2300
Jiri Olsa0f98b112019-07-21 13:23:55 +02002301 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002302 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002303 return -ENOMEM;
2304
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002305 err = perf_config(perf_record_config, rec);
2306 if (err)
2307 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002308
Tom Zanussibca647a2010-11-10 08:11:30 -06002309 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002310 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002311 if (quiet)
2312 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002313
2314 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002315 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002316 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002317
Namhyung Kimbea03402012-04-26 14:15:15 +09002318 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002319 usage_with_options_msg(record_usage, record_options,
2320 "cgroup monitoring only available in system-wide mode");
2321
Stephane Eranian023695d2011-02-14 11:20:01 +02002322 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002323
2324 if (rec->opts.comp_level != 0) {
2325 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2326 rec->no_buildid = true;
2327 }
2328
Adrian Hunterb757bb02015-07-21 12:44:04 +03002329 if (rec->opts.record_switch_events &&
2330 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002331 ui__error("kernel does not support recording context switch events\n");
2332 parse_options_usage(record_usage, record_options, "switch-events", 0);
2333 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002334 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002335
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002336 if (switch_output_setup(rec)) {
2337 parse_options_usage(record_usage, record_options, "switch-output", 0);
2338 return -EINVAL;
2339 }
2340
Jiri Olsabfacbe32017-01-09 10:52:00 +01002341 if (rec->switch_output.time) {
2342 signal(SIGALRM, alarm_sig_handler);
2343 alarm(rec->switch_output.time);
2344 }
2345
Andi Kleen03724b22019-03-14 15:49:55 -07002346 if (rec->switch_output.num_files) {
2347 rec->switch_output.filenames = calloc(sizeof(char *),
2348 rec->switch_output.num_files);
2349 if (!rec->switch_output.filenames)
2350 return -EINVAL;
2351 }
2352
Adrian Hunter1b36c032016-09-23 17:38:39 +03002353 /*
2354 * Allow aliases to facilitate the lookup of symbols for address
2355 * filters. Refer to auxtrace_parse_filters().
2356 */
2357 symbol_conf.allow_aliases = true;
2358
2359 symbol__init(NULL);
2360
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002361 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002362 if (err)
2363 goto out;
2364
Wang Nan0aab2132016-06-16 08:02:41 +00002365 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002366 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002367
Wang Nand7888572016-04-08 15:07:24 +00002368 err = bpf__setup_stdout(rec->evlist);
2369 if (err) {
2370 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2371 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2372 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002373 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002374 }
2375
Adrian Hunteref149c22015-04-09 18:53:45 +03002376 err = -ENOMEM;
2377
Wang Nan0c1d46a2016-04-20 18:59:52 +00002378 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002379 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002380 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002381 /*
2382 * In 'perf record --switch-output', disable buildid
2383 * generation by default to reduce data file switching
2384 * overhead. Still generate buildid if they are required
2385 * explicitly using
2386 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002387 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002388 * --no-no-buildid-cache
2389 *
2390 * Following code equals to:
2391 *
2392 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2393 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2394 * disable_buildid_cache();
2395 */
2396 bool disable = true;
2397
2398 if (rec->no_buildid_set && !rec->no_buildid)
2399 disable = false;
2400 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2401 disable = false;
2402 if (disable) {
2403 rec->no_buildid = true;
2404 rec->no_buildid_cache = true;
2405 disable_buildid_cache();
2406 }
2407 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002408
Wang Nan4ea648a2016-07-14 08:34:47 +00002409 if (record.opts.overwrite)
2410 record.opts.tail_synthesize = true;
2411
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002412 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002413 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002414 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002415 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002416 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002417
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002418 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2419 rec->opts.no_inherit = true;
2420
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002421 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002422 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002423 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002424 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002425 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002426
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002427 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002428 if (err) {
2429 int saved_errno = errno;
2430
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002431 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002432 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002433
2434 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002435 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002436 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002437
Mengting Zhangca800062017-12-13 15:01:53 +08002438 /* Enable ignoring missing threads when -u/-p option is defined. */
2439 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002440
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002441 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002442 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002443 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002444
Adrian Hunteref149c22015-04-09 18:53:45 +03002445 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2446 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002447 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002448
Namhyung Kim61566812016-01-11 22:37:09 +09002449 /*
2450 * We take all buildids when the file contains
2451 * AUX area tracing data because we do not decode the
2452 * trace because it would take too long.
2453 */
2454 if (rec->opts.full_auxtrace)
2455 rec->buildid_all = true;
2456
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002457 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002458 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002459 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002460 }
2461
Alexey Budankov93f20c02018-11-06 12:07:19 +03002462 if (rec->opts.nr_cblocks > nr_cblocks_max)
2463 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002464 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002465
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002466 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002467 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002468
Alexey Budankov51255a82019-03-18 20:42:19 +03002469 if (rec->opts.comp_level > comp_level_max)
2470 rec->opts.comp_level = comp_level_max;
2471 pr_debug("comp level: %d\n", rec->opts.comp_level);
2472
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002473 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002474out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002475 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002476 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002477 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002478 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002479}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002480
2481static void snapshot_sig_handler(int sig __maybe_unused)
2482{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002483 struct record *rec = &record;
2484
Wang Nan5f9cf592016-04-20 18:59:49 +00002485 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2486 trigger_hit(&auxtrace_snapshot_trigger);
2487 auxtrace_record__snapshot_started = 1;
2488 if (auxtrace_record__snapshot_start(record.itr))
2489 trigger_error(&auxtrace_snapshot_trigger);
2490 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002491
Jiri Olsadc0c6122017-01-09 10:51:58 +01002492 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002493 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002494}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002495
2496static void alarm_sig_handler(int sig __maybe_unused)
2497{
2498 struct record *rec = &record;
2499
2500 if (switch_output_time(rec))
2501 trigger_hit(&switch_output_trigger);
2502}