blob: 3f66a49a997fdd2404f3c02abe4d20d42dc54673 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030041#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030046#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020047
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030048#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030049#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030050#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030051#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020052#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020053#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030054#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030055#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030056#include <sys/wait.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053057#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030058#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030059#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030060#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030061
Jiri Olsa1b43b702017-01-09 10:51:56 +010062struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010063 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010064 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010065 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010066 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010067 const char *str;
68 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070069 char **filenames;
70 int num_files;
71 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010072};
73
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030074struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020075 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030076 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010078 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030079 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020080 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020082 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020083 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000084 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020085 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000086 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090087 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000088 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080089 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010090 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070091 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030092 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020093};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020094
Jiri Olsadc0c6122017-01-09 10:51:58 +010095static volatile int auxtrace_record__snapshot_started;
96static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
97static DEFINE_TRIGGER(switch_output_trigger);
98
Alexey Budankov9d2ed642019-01-22 20:47:43 +030099static const char *affinity_tags[PERF_AFFINITY_MAX] = {
100 "SYS", "NODE", "CPU"
101};
102
Jiri Olsadc0c6122017-01-09 10:51:58 +0100103static bool switch_output_signal(struct record *rec)
104{
105 return rec->switch_output.signal &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
109static bool switch_output_size(struct record *rec)
110{
111 return rec->switch_output.size &&
112 trigger_is_ready(&switch_output_trigger) &&
113 (rec->bytes_written >= rec->switch_output.size);
114}
115
Jiri Olsabfacbe32017-01-09 10:52:00 +0100116static bool switch_output_time(struct record *rec)
117{
118 return rec->switch_output.time &&
119 trigger_is_ready(&switch_output_trigger);
120}
121
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200122static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
123 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200124{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200125 struct perf_data_file *file = &rec->session->data->file;
126
127 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100128 pr_err("failed to write perf data, error: %m\n");
129 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200130 }
David Ahern8d3eca22012-08-26 12:24:47 -0600131
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300132 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100133
134 if (switch_output_size(rec))
135 trigger_hit(&switch_output_trigger);
136
David Ahern8d3eca22012-08-26 12:24:47 -0600137 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200138}
139
Alexey Budankovef781122019-03-18 20:44:12 +0300140static int record__aio_enabled(struct record *rec);
141static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300142static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
143 void *src, size_t src_size);
144
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300145#ifdef HAVE_AIO_SUPPORT
146static int record__aio_write(struct aiocb *cblock, int trace_fd,
147 void *buf, size_t size, off_t off)
148{
149 int rc;
150
151 cblock->aio_fildes = trace_fd;
152 cblock->aio_buf = buf;
153 cblock->aio_nbytes = size;
154 cblock->aio_offset = off;
155 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
156
157 do {
158 rc = aio_write(cblock);
159 if (rc == 0) {
160 break;
161 } else if (errno != EAGAIN) {
162 cblock->aio_fildes = -1;
163 pr_err("failed to queue perf data, error: %m\n");
164 break;
165 }
166 } while (1);
167
168 return rc;
169}
170
171static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
172{
173 void *rem_buf;
174 off_t rem_off;
175 size_t rem_size;
176 int rc, aio_errno;
177 ssize_t aio_ret, written;
178
179 aio_errno = aio_error(cblock);
180 if (aio_errno == EINPROGRESS)
181 return 0;
182
183 written = aio_ret = aio_return(cblock);
184 if (aio_ret < 0) {
185 if (aio_errno != EINTR)
186 pr_err("failed to write perf data, error: %m\n");
187 written = 0;
188 }
189
190 rem_size = cblock->aio_nbytes - written;
191
192 if (rem_size == 0) {
193 cblock->aio_fildes = -1;
194 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300195 * md->refcount is incremented in record__aio_pushfn() for
196 * every aio write request started in record__aio_push() so
197 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300198 */
199 perf_mmap__put(md);
200 rc = 1;
201 } else {
202 /*
203 * aio write request may require restart with the
204 * reminder if the kernel didn't write whole
205 * chunk at once.
206 */
207 rem_off = cblock->aio_offset + written;
208 rem_buf = (void *)(cblock->aio_buf + written);
209 record__aio_write(cblock, cblock->aio_fildes,
210 rem_buf, rem_size, rem_off);
211 rc = 0;
212 }
213
214 return rc;
215}
216
Alexey Budankov93f20c02018-11-06 12:07:19 +0300217static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300218{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300219 struct aiocb **aiocb = md->aio.aiocb;
220 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300221 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300222 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300223
224 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300225 do_suspend = 0;
226 for (i = 0; i < md->aio.nr_cblocks; ++i) {
227 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
228 if (sync_all)
229 aiocb[i] = NULL;
230 else
231 return i;
232 } else {
233 /*
234 * Started aio write is not complete yet
235 * so it has to be waited before the
236 * next allocation.
237 */
238 aiocb[i] = &cblocks[i];
239 do_suspend = 1;
240 }
241 }
242 if (!do_suspend)
243 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300244
Alexey Budankov93f20c02018-11-06 12:07:19 +0300245 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300246 if (!(errno == EAGAIN || errno == EINTR))
247 pr_err("failed to sync perf data, error: %m\n");
248 }
249 } while (1);
250}
251
Alexey Budankovef781122019-03-18 20:44:12 +0300252struct record_aio {
253 struct record *rec;
254 void *data;
255 size_t size;
256};
257
258static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300259{
Alexey Budankovef781122019-03-18 20:44:12 +0300260 struct record_aio *aio = to;
261
262 /*
263 * map->base data pointed by buf is copied into free map->aio.data[] buffer
264 * to release space in the kernel buffer as fast as possible, calling
265 * perf_mmap__consume() from perf_mmap__push() function.
266 *
267 * That lets the kernel to proceed with storing more profiling data into
268 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
269 *
270 * Coping can be done in two steps in case the chunk of profiling data
271 * crosses the upper bound of the kernel buffer. In this case we first move
272 * part of data from map->start till the upper bound and then the reminder
273 * from the beginning of the kernel buffer till the end of the data chunk.
274 */
275
276 if (record__comp_enabled(aio->rec)) {
277 size = zstd_compress(aio->rec->session, aio->data + aio->size,
278 perf_mmap__mmap_len(map) - aio->size,
279 buf, size);
280 } else {
281 memcpy(aio->data + aio->size, buf, size);
282 }
283
284 if (!aio->size) {
285 /*
286 * Increment map->refcount to guard map->aio.data[] buffer
287 * from premature deallocation because map object can be
288 * released earlier than aio write request started on
289 * map->aio.data[] buffer is complete.
290 *
291 * perf_mmap__put() is done at record__aio_complete()
292 * after started aio request completion or at record__aio_push()
293 * if the request failed to start.
294 */
295 perf_mmap__get(map);
296 }
297
298 aio->size += size;
299
300 return size;
301}
302
303static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
304{
305 int ret, idx;
306 int trace_fd = rec->session->data->file.fd;
307 struct record_aio aio = { .rec = rec, .size = 0 };
308
309 /*
310 * Call record__aio_sync() to wait till map->aio.data[] buffer
311 * becomes available after previous aio write operation.
312 */
313
314 idx = record__aio_sync(map, false);
315 aio.data = map->aio.data[idx];
316 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
317 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
318 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300319
320 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300321 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300322 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300323 *off += aio.size;
324 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300325 if (switch_output_size(rec))
326 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300327 } else {
328 /*
329 * Decrement map->refcount incremented in record__aio_pushfn()
330 * back if record__aio_write() operation failed to start, otherwise
331 * map->refcount is decremented in record__aio_complete() after
332 * aio write operation finishes successfully.
333 */
334 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300335 }
336
337 return ret;
338}
339
340static off_t record__aio_get_pos(int trace_fd)
341{
342 return lseek(trace_fd, 0, SEEK_CUR);
343}
344
345static void record__aio_set_pos(int trace_fd, off_t pos)
346{
347 lseek(trace_fd, pos, SEEK_SET);
348}
349
350static void record__aio_mmap_read_sync(struct record *rec)
351{
352 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200353 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300354 struct perf_mmap *maps = evlist->mmap;
355
Alexey Budankovef781122019-03-18 20:44:12 +0300356 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300357 return;
358
359 for (i = 0; i < evlist->nr_mmaps; i++) {
360 struct perf_mmap *map = &maps[i];
361
362 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300363 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300364 }
365}
366
367static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300368static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300369
370static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300371 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300372 int unset)
373{
374 struct record_opts *opts = (struct record_opts *)opt->value;
375
Alexey Budankov93f20c02018-11-06 12:07:19 +0300376 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300377 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300378 } else {
379 if (str)
380 opts->nr_cblocks = strtol(str, NULL, 0);
381 if (!opts->nr_cblocks)
382 opts->nr_cblocks = nr_cblocks_default;
383 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300384
385 return 0;
386}
387#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300388static int nr_cblocks_max = 0;
389
Alexey Budankovef781122019-03-18 20:44:12 +0300390static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
391 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300392{
393 return -1;
394}
395
396static off_t record__aio_get_pos(int trace_fd __maybe_unused)
397{
398 return -1;
399}
400
401static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
402{
403}
404
405static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
406{
407}
408#endif
409
410static int record__aio_enabled(struct record *rec)
411{
412 return rec->opts.nr_cblocks > 0;
413}
414
Alexey Budankov470530b2019-03-18 20:40:26 +0300415#define MMAP_FLUSH_DEFAULT 1
416static int record__mmap_flush_parse(const struct option *opt,
417 const char *str,
418 int unset)
419{
420 int flush_max;
421 struct record_opts *opts = (struct record_opts *)opt->value;
422 static struct parse_tag tags[] = {
423 { .tag = 'B', .mult = 1 },
424 { .tag = 'K', .mult = 1 << 10 },
425 { .tag = 'M', .mult = 1 << 20 },
426 { .tag = 'G', .mult = 1 << 30 },
427 { .tag = 0 },
428 };
429
430 if (unset)
431 return 0;
432
433 if (str) {
434 opts->mmap_flush = parse_tag_value(str, tags);
435 if (opts->mmap_flush == (int)-1)
436 opts->mmap_flush = strtol(str, NULL, 0);
437 }
438
439 if (!opts->mmap_flush)
440 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
441
442 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
443 flush_max /= 4;
444 if (opts->mmap_flush > flush_max)
445 opts->mmap_flush = flush_max;
446
447 return 0;
448}
449
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300450#ifdef HAVE_ZSTD_SUPPORT
451static unsigned int comp_level_default = 1;
452
453static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
454{
455 struct record_opts *opts = opt->value;
456
457 if (unset) {
458 opts->comp_level = 0;
459 } else {
460 if (str)
461 opts->comp_level = strtol(str, NULL, 0);
462 if (!opts->comp_level)
463 opts->comp_level = comp_level_default;
464 }
465
466 return 0;
467}
468#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300469static unsigned int comp_level_max = 22;
470
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300471static int record__comp_enabled(struct record *rec)
472{
473 return rec->opts.comp_level > 0;
474}
475
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200476static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200477 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300478 struct perf_sample *sample __maybe_unused,
479 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200480{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300481 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200482 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200483}
484
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200485static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300486{
487 struct record *rec = to;
488
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300489 if (record__comp_enabled(rec)) {
490 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
491 bf = map->data;
492 }
493
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300494 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200495 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300496}
497
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300498static volatile int done;
499static volatile int signr = -1;
500static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000501
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300502static void sig_handler(int sig)
503{
504 if (sig == SIGCHLD)
505 child_finished = 1;
506 else
507 signr = sig;
508
509 done = 1;
510}
511
Wang Nana0748652016-11-26 07:03:28 +0000512static void sigsegv_handler(int sig)
513{
514 perf_hooks__recover();
515 sighandler_dump_stack(sig);
516}
517
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300518static void record__sig_exit(void)
519{
520 if (signr == -1)
521 return;
522
523 signal(signr, SIG_DFL);
524 raise(signr);
525}
526
Adrian Huntere31f0d02015-04-30 17:37:27 +0300527#ifdef HAVE_AUXTRACE_SUPPORT
528
Adrian Hunteref149c22015-04-09 18:53:45 +0300529static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200530 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300531 union perf_event *event, void *data1,
532 size_t len1, void *data2, size_t len2)
533{
534 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100535 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300536 size_t padding;
537 u8 pad[8] = {0};
538
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100539 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300540 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100541 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300542 int err;
543
544 file_offset = lseek(fd, 0, SEEK_CUR);
545 if (file_offset == -1)
546 return -1;
547 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
548 event, file_offset);
549 if (err)
550 return err;
551 }
552
Adrian Hunteref149c22015-04-09 18:53:45 +0300553 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
554 padding = (len1 + len2) & 7;
555 if (padding)
556 padding = 8 - padding;
557
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200558 record__write(rec, map, event, event->header.size);
559 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300560 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200561 record__write(rec, map, data2, len2);
562 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300563
564 return 0;
565}
566
567static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200568 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300569{
570 int ret;
571
Jiri Olsae035f4c2018-09-13 14:54:05 +0200572 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300573 record__process_auxtrace);
574 if (ret < 0)
575 return ret;
576
577 if (ret)
578 rec->samples++;
579
580 return 0;
581}
582
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300583static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200584 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300585{
586 int ret;
587
Jiri Olsae035f4c2018-09-13 14:54:05 +0200588 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300589 record__process_auxtrace,
590 rec->opts.auxtrace_snapshot_size);
591 if (ret < 0)
592 return ret;
593
594 if (ret)
595 rec->samples++;
596
597 return 0;
598}
599
600static int record__auxtrace_read_snapshot_all(struct record *rec)
601{
602 int i;
603 int rc = 0;
604
605 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200606 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300607
Jiri Olsae035f4c2018-09-13 14:54:05 +0200608 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300609 continue;
610
Jiri Olsae035f4c2018-09-13 14:54:05 +0200611 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300612 rc = -1;
613 goto out;
614 }
615 }
616out:
617 return rc;
618}
619
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300620static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300621{
622 pr_debug("Recording AUX area tracing snapshot\n");
623 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000624 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300625 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300626 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000627 trigger_error(&auxtrace_snapshot_trigger);
628 else
629 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300630 }
631}
632
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300633static int record__auxtrace_snapshot_exit(struct record *rec)
634{
635 if (trigger_is_error(&auxtrace_snapshot_trigger))
636 return 0;
637
638 if (!auxtrace_record__snapshot_started &&
639 auxtrace_record__snapshot_start(rec->itr))
640 return -1;
641
642 record__read_auxtrace_snapshot(rec, true);
643 if (trigger_is_error(&auxtrace_snapshot_trigger))
644 return -1;
645
646 return 0;
647}
648
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200649static int record__auxtrace_init(struct record *rec)
650{
651 int err;
652
653 if (!rec->itr) {
654 rec->itr = auxtrace_record__init(rec->evlist, &err);
655 if (err)
656 return err;
657 }
658
659 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
660 rec->opts.auxtrace_snapshot_opts);
661 if (err)
662 return err;
663
664 return auxtrace_parse_filters(rec->evlist);
665}
666
Adrian Huntere31f0d02015-04-30 17:37:27 +0300667#else
668
669static inline
670int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200671 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300672{
673 return 0;
674}
675
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300676static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300677void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
678 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300679{
680}
681
682static inline
683int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
684{
685 return 0;
686}
687
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300688static inline
689int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
690{
691 return 0;
692}
693
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200694static int record__auxtrace_init(struct record *rec __maybe_unused)
695{
696 return 0;
697}
698
Adrian Huntere31f0d02015-04-30 17:37:27 +0300699#endif
700
Wang Nancda57a82016-06-27 10:24:03 +0000701static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200702 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000703{
704 struct record_opts *opts = &rec->opts;
705 char msg[512];
706
Alexey Budankovf13de662019-01-22 20:50:57 +0300707 if (opts->affinity != PERF_AFFINITY_SYS)
708 cpu__setup_cpunode_map();
709
Wang Nan7a276ff2017-12-03 02:00:38 +0000710 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000711 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300712 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300713 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300714 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000715 if (errno == EPERM) {
716 pr_err("Permission error mapping pages.\n"
717 "Consider increasing "
718 "/proc/sys/kernel/perf_event_mlock_kb,\n"
719 "or try again with a smaller value of -m/--mmap_pages.\n"
720 "(current value: %u,%u)\n",
721 opts->mmap_pages, opts->auxtrace_mmap_pages);
722 return -errno;
723 } else {
724 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300725 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000726 if (errno)
727 return -errno;
728 else
729 return -EINVAL;
730 }
731 }
732 return 0;
733}
734
735static int record__mmap(struct record *rec)
736{
737 return record__mmap_evlist(rec, rec->evlist);
738}
739
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300740static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200741{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300742 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200743 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200744 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200745 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300746 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600747 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200748
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300749 /*
750 * For initial_delay we need to add a dummy event so that we can track
751 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
752 * real events, the ones asked by the user.
753 */
754 if (opts->initial_delay) {
755 if (perf_evlist__add_dummy(evlist))
756 return -ENOMEM;
757
758 pos = perf_evlist__first(evlist);
759 pos->tracking = 0;
760 pos = perf_evlist__last(evlist);
761 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200762 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300763 }
764
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300765 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100766
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300767 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200768try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200769 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300770 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900771 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300772 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300773 goto try_again;
774 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700775 if ((errno == EINVAL || errno == EBADF) &&
776 pos->leader != pos &&
777 pos->weak_group) {
778 pos = perf_evlist__reset_weak_group(evlist, pos);
779 goto try_again;
780 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300781 rc = -errno;
782 perf_evsel__open_strerror(pos, &opts->target,
783 errno, msg, sizeof(msg));
784 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600785 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300786 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800787
788 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800789 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200790
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300791 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300792 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300793 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300794 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600795 rc = -1;
796 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100797 }
798
Wang Nancda57a82016-06-27 10:24:03 +0000799 rc = record__mmap(rec);
800 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600801 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200802
Jiri Olsa563aecb2013-06-05 13:35:06 +0200803 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300804 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600805out:
806 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200807}
808
Namhyung Kime3d59112015-01-29 17:06:44 +0900809static int process_sample_event(struct perf_tool *tool,
810 union perf_event *event,
811 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200812 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900813 struct machine *machine)
814{
815 struct record *rec = container_of(tool, struct record, tool);
816
Jin Yao68588ba2017-12-08 21:13:42 +0800817 if (rec->evlist->first_sample_time == 0)
818 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900819
Jin Yao68588ba2017-12-08 21:13:42 +0800820 rec->evlist->last_sample_time = sample->time;
821
822 if (rec->buildid_all)
823 return 0;
824
825 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900826 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
827}
828
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300829static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200830{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200831 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200832
Jiri Olsa45112e82019-02-21 10:41:29 +0100833 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300834 return 0;
835
Namhyung Kim00dc8652014-11-04 10:14:32 +0900836 /*
837 * During this process, it'll load kernel map and replace the
838 * dso->long_name to a real pathname it found. In this case
839 * we prefer the vmlinux path like
840 * /lib/modules/3.16.4/build/vmlinux
841 *
842 * rather than build-id path (in debug directory).
843 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
844 */
845 symbol_conf.ignore_vmlinux_buildid = true;
846
Namhyung Kim61566812016-01-11 22:37:09 +0900847 /*
848 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800849 * so no need to process samples. But if timestamp_boundary is enabled,
850 * it still needs to walk on all samples to get the timestamps of
851 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900852 */
Jin Yao68588ba2017-12-08 21:13:42 +0800853 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900854 rec->tool.sample = NULL;
855
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300856 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200857}
858
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200859static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800860{
861 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200862 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800863 /*
864 *As for guest kernel when processing subcommand record&report,
865 *we arrange module mmap prior to guest kernel mmap and trigger
866 *a preload dso because default guest module symbols are loaded
867 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
868 *method is used to avoid symbol missing when the first addr is
869 *in module instead of in guest kernel.
870 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200871 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200872 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800873 if (err < 0)
874 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300875 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800876
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800877 /*
878 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
879 * have no _text sometimes.
880 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200881 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200882 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800883 if (err < 0)
884 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300885 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800886}
887
Frederic Weisbecker98402802010-05-02 22:05:29 +0200888static struct perf_event_header finished_round_event = {
889 .size = sizeof(struct perf_event_header),
890 .type = PERF_RECORD_FINISHED_ROUND,
891};
892
Alexey Budankovf13de662019-01-22 20:50:57 +0300893static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
894{
895 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
896 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
897 CPU_ZERO(&rec->affinity_mask);
898 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
899 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
900 }
901}
902
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300903static size_t process_comp_header(void *record, size_t increment)
904{
Jiri Olsa72932372019-08-28 15:57:16 +0200905 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300906 size_t size = sizeof(*event);
907
908 if (increment) {
909 event->header.size += increment;
910 return increment;
911 }
912
913 event->header.type = PERF_RECORD_COMPRESSED;
914 event->header.size = size;
915
916 return size;
917}
918
919static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
920 void *src, size_t src_size)
921{
922 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200923 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300924
925 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
926 max_record_size, process_comp_header);
927
928 session->bytes_transferred += src_size;
929 session->bytes_compressed += compressed;
930
931 return compressed;
932}
933
Jiri Olsa63503db2019-07-21 13:23:52 +0200934static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300935 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200936{
Jiri Olsadcabb502014-07-25 16:56:16 +0200937 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200938 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600939 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000940 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300941 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300942 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200943
Wang Nancb216862016-06-27 10:24:04 +0000944 if (!evlist)
945 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300946
Wang Nan0b72d692017-12-04 16:51:07 +0000947 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000948 if (!maps)
949 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000950
Wang Nan0b72d692017-12-04 16:51:07 +0000951 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000952 return 0;
953
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300954 if (record__aio_enabled(rec))
955 off = record__aio_get_pos(trace_fd);
956
Wang Nana4ea0ec2016-07-14 08:34:36 +0000957 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300958 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200959 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000960
Jiri Olsae035f4c2018-09-13 14:54:05 +0200961 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300962 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300963 if (synch) {
964 flush = map->flush;
965 map->flush = 1;
966 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300967 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300968 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300969 if (synch)
970 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300971 rc = -1;
972 goto out;
973 }
974 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300975 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300976 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300977 if (synch)
978 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300979 rc = -1;
980 goto out;
981 }
David Ahern8d3eca22012-08-26 12:24:47 -0600982 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300983 if (synch)
984 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600985 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300986
Jiri Olsae035f4c2018-09-13 14:54:05 +0200987 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
988 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300989 rc = -1;
990 goto out;
991 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200992 }
993
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300994 if (record__aio_enabled(rec))
995 record__aio_set_pos(trace_fd, off);
996
Jiri Olsadcabb502014-07-25 16:56:16 +0200997 /*
998 * Mark the round finished in case we wrote
999 * at least one event.
1000 */
1001 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001002 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001003
Wang Nan0b72d692017-12-04 16:51:07 +00001004 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001005 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001006out:
1007 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001008}
1009
Alexey Budankov470530b2019-03-18 20:40:26 +03001010static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001011{
1012 int err;
1013
Alexey Budankov470530b2019-03-18 20:40:26 +03001014 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001015 if (err)
1016 return err;
1017
Alexey Budankov470530b2019-03-18 20:40:26 +03001018 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001019}
1020
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001021static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001022{
David Ahern57706ab2013-11-06 11:41:34 -07001023 struct perf_session *session = rec->session;
1024 int feat;
1025
1026 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1027 perf_header__set_feat(&session->header, feat);
1028
1029 if (rec->no_buildid)
1030 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1031
Jiri Olsace9036a2019-07-21 13:24:23 +02001032 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001033 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1034
1035 if (!rec->opts.branch_stack)
1036 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001037
1038 if (!rec->opts.full_auxtrace)
1039 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001040
Alexey Budankovcf790512018-10-09 17:36:24 +03001041 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1042 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1043
Jiri Olsa258031c2019-03-08 14:47:39 +01001044 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001045 if (!record__comp_enabled(rec))
1046 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001047
Jiri Olsaffa517a2015-10-25 15:51:43 +01001048 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001049}
1050
Wang Nane1ab48b2016-02-26 09:32:10 +00001051static void
1052record__finish_output(struct record *rec)
1053{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001054 struct perf_data *data = &rec->data;
1055 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001056
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001057 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001058 return;
1059
1060 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001061 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001062
1063 if (!rec->no_buildid) {
1064 process_buildids(rec);
1065
1066 if (rec->buildid_all)
1067 dsos__hit_all(rec->session);
1068 }
1069 perf_session__write_header(rec->session, rec->evlist, fd, true);
1070
1071 return;
1072}
1073
Wang Nan4ea648a2016-07-14 08:34:47 +00001074static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001075{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001076 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001077 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001078
Wang Nan4ea648a2016-07-14 08:34:47 +00001079 if (rec->opts.tail_synthesize != tail)
1080 return 0;
1081
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001082 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1083 if (thread_map == NULL)
1084 return -1;
1085
1086 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001087 process_synthesized_event,
1088 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001089 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001090 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001091 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001092}
1093
Wang Nan4ea648a2016-07-14 08:34:47 +00001094static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001095
Wang Nanecfd7a92016-04-13 08:21:07 +00001096static int
1097record__switch_output(struct record *rec, bool at_exit)
1098{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001099 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001100 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001101 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001102
1103 /* Same Size: "2015122520103046"*/
1104 char timestamp[] = "InvalidTimestamp";
1105
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001106 record__aio_mmap_read_sync(rec);
1107
Wang Nan4ea648a2016-07-14 08:34:47 +00001108 record__synthesize(rec, true);
1109 if (target__none(&rec->opts.target))
1110 record__synthesize_workload(rec, true);
1111
Wang Nanecfd7a92016-04-13 08:21:07 +00001112 rec->samples = 0;
1113 record__finish_output(rec);
1114 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1115 if (err) {
1116 pr_err("Failed to get current timestamp\n");
1117 return -EINVAL;
1118 }
1119
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001120 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001121 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001122 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001123 if (fd >= 0 && !at_exit) {
1124 rec->bytes_written = 0;
1125 rec->session->header.data_size = 0;
1126 }
1127
1128 if (!quiet)
1129 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001130 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001131
Andi Kleen03724b22019-03-14 15:49:55 -07001132 if (rec->switch_output.num_files) {
1133 int n = rec->switch_output.cur_file + 1;
1134
1135 if (n >= rec->switch_output.num_files)
1136 n = 0;
1137 rec->switch_output.cur_file = n;
1138 if (rec->switch_output.filenames[n]) {
1139 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001140 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001141 }
1142 rec->switch_output.filenames[n] = new_filename;
1143 } else {
1144 free(new_filename);
1145 }
1146
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001147 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001148 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001149 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001150
Wang Nanbe7b0c92016-04-20 18:59:54 +00001151 /*
1152 * In 'perf record --switch-output' without -a,
1153 * record__synthesize() in record__switch_output() won't
1154 * generate tracking events because there's no thread_map
1155 * in evlist. Which causes newly created perf.data doesn't
1156 * contain map and comm information.
1157 * Create a fake thread_map and directly call
1158 * perf_event__synthesize_thread_map() for those events.
1159 */
1160 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001161 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001162 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001163 return fd;
1164}
1165
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001166static volatile int workload_exec_errno;
1167
1168/*
1169 * perf_evlist__prepare_workload will send a SIGUSR1
1170 * if the fork fails, since we asked by setting its
1171 * want_signal to true.
1172 */
Namhyung Kim45604712014-05-12 09:47:24 +09001173static void workload_exec_failed_signal(int signo __maybe_unused,
1174 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001175 void *ucontext __maybe_unused)
1176{
1177 workload_exec_errno = info->si_value.sival_int;
1178 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001179 child_finished = 1;
1180}
1181
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001182static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001183static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001184
Wang Nanee667f92016-06-27 10:24:05 +00001185static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001186perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001187{
Wang Nanb2cb6152016-07-14 08:34:39 +00001188 if (evlist) {
1189 if (evlist->mmap && evlist->mmap[0].base)
1190 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001191 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1192 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001193 }
Wang Nanee667f92016-06-27 10:24:05 +00001194 return NULL;
1195}
1196
Wang Nanc45628b2016-05-24 02:28:59 +00001197static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1198{
Wang Nanee667f92016-06-27 10:24:05 +00001199 const struct perf_event_mmap_page *pc;
1200
1201 pc = perf_evlist__pick_pc(rec->evlist);
1202 if (pc)
1203 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001204 return NULL;
1205}
1206
Wang Nan4ea648a2016-07-14 08:34:47 +00001207static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001208{
1209 struct perf_session *session = rec->session;
1210 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001211 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001212 struct record_opts *opts = &rec->opts;
1213 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001214 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001215 int err = 0;
1216
Wang Nan4ea648a2016-07-14 08:34:47 +00001217 if (rec->opts.tail_synthesize != tail)
1218 return 0;
1219
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001220 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001221 /*
1222 * We need to synthesize events first, because some
1223 * features works on top of them (on report side).
1224 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001225 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001226 process_synthesized_event);
1227 if (err < 0) {
1228 pr_err("Couldn't synthesize attrs.\n");
1229 goto out;
1230 }
1231
Jiri Olsaa2015512018-03-14 10:22:04 +01001232 err = perf_event__synthesize_features(tool, session, rec->evlist,
1233 process_synthesized_event);
1234 if (err < 0) {
1235 pr_err("Couldn't synthesize features.\n");
1236 return err;
1237 }
1238
Jiri Olsace9036a2019-07-21 13:24:23 +02001239 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001240 /*
1241 * FIXME err <= 0 here actually means that
1242 * there were no tracepoints so its not really
1243 * an error, just that we don't need to
1244 * synthesize anything. We really have to
1245 * return this more properly and also
1246 * propagate errors that now are calling die()
1247 */
1248 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1249 process_synthesized_event);
1250 if (err <= 0) {
1251 pr_err("Couldn't record tracing data.\n");
1252 goto out;
1253 }
1254 rec->bytes_written += err;
1255 }
1256 }
1257
Wang Nanc45628b2016-05-24 02:28:59 +00001258 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001259 process_synthesized_event, machine);
1260 if (err)
1261 goto out;
1262
Wang Nanc45c86e2016-02-26 09:32:07 +00001263 if (rec->opts.full_auxtrace) {
1264 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1265 session, process_synthesized_event);
1266 if (err)
1267 goto out;
1268 }
1269
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001270 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1271 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1272 machine);
1273 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1274 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1275 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001276
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001277 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1278 machine);
1279 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1280 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1281 "Check /proc/modules permission or run as root.\n");
1282 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001283
1284 if (perf_guest) {
1285 machines__process_guests(&session->machines,
1286 perf_event__synthesize_guest_os, tool);
1287 }
1288
Andi Kleenbfd8f722017-11-17 13:42:58 -08001289 err = perf_event__synthesize_extra_attr(&rec->tool,
1290 rec->evlist,
1291 process_synthesized_event,
1292 data->is_pipe);
1293 if (err)
1294 goto out;
1295
Jiri Olsa03617c22019-07-21 13:24:42 +02001296 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001297 process_synthesized_event,
1298 NULL);
1299 if (err < 0) {
1300 pr_err("Couldn't synthesize thread map.\n");
1301 return err;
1302 }
1303
Jiri Olsaf72f9012019-07-21 13:24:41 +02001304 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001305 process_synthesized_event, NULL);
1306 if (err < 0) {
1307 pr_err("Couldn't synthesize cpu map.\n");
1308 return err;
1309 }
1310
Song Liue5416952019-03-11 22:30:41 -07001311 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001312 machine, opts);
1313 if (err < 0)
1314 pr_warning("Couldn't synthesize bpf events.\n");
1315
Jiri Olsa03617c22019-07-21 13:24:42 +02001316 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001317 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001318 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001319out:
1320 return err;
1321}
1322
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001323static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001324{
David Ahern57706ab2013-11-06 11:41:34 -07001325 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001326 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001327 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001328 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001329 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001330 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001331 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001332 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001333 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001334 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001335 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001336 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001337
Namhyung Kim45604712014-05-12 09:47:24 +09001338 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001339 signal(SIGCHLD, sig_handler);
1340 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001341 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001342 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001343
Hari Bathinif3b36142017-03-08 02:11:43 +05301344 if (rec->opts.record_namespaces)
1345 tool->namespace_events = true;
1346
Jiri Olsadc0c6122017-01-09 10:51:58 +01001347 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001348 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001349 if (rec->opts.auxtrace_snapshot_mode)
1350 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001351 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001352 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001353 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001354 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001355 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001356
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001357 session = perf_session__new(data, false, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301358 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09001359 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301360 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001361 }
1362
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001363 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001364 rec->session = session;
1365
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001366 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1367 pr_err("Compression initialization failed.\n");
1368 return -1;
1369 }
1370
1371 session->header.env.comp_type = PERF_COMP_ZSTD;
1372 session->header.env.comp_level = rec->opts.comp_level;
1373
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001374 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001375
Alexey Budankovcf790512018-10-09 17:36:24 +03001376 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1377 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1378
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001379 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001380 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001381 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001382 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001383 if (err < 0) {
1384 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001385 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001386 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001387 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001388 }
1389
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001390 /*
1391 * If we have just single event and are sending data
1392 * through pipe, we need to force the ids allocation,
1393 * because we synthesize event name through the pipe
1394 * and need the id for that.
1395 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001396 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001397 rec->opts.sample_id = true;
1398
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001399 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001400 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001401 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001402 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001403 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001404
Wang Nan8690a2a2016-02-22 09:10:32 +00001405 err = bpf__apply_obj_config();
1406 if (err) {
1407 char errbuf[BUFSIZ];
1408
1409 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1410 pr_err("ERROR: Apply config to BPF failed: %s\n",
1411 errbuf);
1412 goto out_child;
1413 }
1414
Adrian Huntercca84822015-08-19 17:29:21 +03001415 /*
1416 * Normally perf_session__new would do this, but it doesn't have the
1417 * evlist.
1418 */
1419 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1420 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1421 rec->tool.ordered_events = false;
1422 }
1423
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001424 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001425 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1426
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001427 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001428 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001429 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001430 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001431 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001432 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001433 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001434 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001435 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001436
David Ahernd3665492012-02-06 15:27:52 -07001437 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001438 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001439 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001440 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001441 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001442 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001443 }
1444
Song Liud56354d2019-03-11 22:30:51 -07001445 if (!opts->no_bpf_event)
1446 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1447
Song Liu657ee552019-03-11 22:30:50 -07001448 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1449 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1450 opts->no_bpf_event = true;
1451 }
1452
Wang Nan4ea648a2016-07-14 08:34:47 +00001453 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001454 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001455 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001456
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001457 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001458 struct sched_param param;
1459
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001460 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001461 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001462 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001463 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001464 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001465 }
1466 }
1467
Jiri Olsa774cb492012-11-12 18:34:01 +01001468 /*
1469 * When perf is starting the traced process, all the events
1470 * (apart from group members) have enable_on_exec=1 set,
1471 * so don't spoil it by prematurely enabling them.
1472 */
Andi Kleen6619a532014-01-11 13:38:27 -08001473 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001474 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001475
Peter Zijlstra856e9662009-12-16 17:55:55 +01001476 /*
1477 * Let the child rip
1478 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001479 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001480 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001481 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301482 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001483
1484 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1485 if (event == NULL) {
1486 err = -ENOMEM;
1487 goto out_child;
1488 }
1489
Namhyung Kime803cf92015-09-22 09:24:55 +09001490 /*
1491 * Some H/W events are generated before COMM event
1492 * which is emitted during exec(), so perf script
1493 * cannot see a correct process name for those events.
1494 * Synthesize COMM event to prevent it.
1495 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301496 tgid = perf_event__synthesize_comm(tool, event,
1497 rec->evlist->workload.pid,
1498 process_synthesized_event,
1499 machine);
1500 free(event);
1501
1502 if (tgid == -1)
1503 goto out_child;
1504
1505 event = malloc(sizeof(event->namespaces) +
1506 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1507 machine->id_hdr_size);
1508 if (event == NULL) {
1509 err = -ENOMEM;
1510 goto out_child;
1511 }
1512
1513 /*
1514 * Synthesize NAMESPACES event for the command specified.
1515 */
1516 perf_event__synthesize_namespaces(tool, event,
1517 rec->evlist->workload.pid,
1518 tgid, process_synthesized_event,
1519 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001520 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001521
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001522 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001523 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001524
Andi Kleen6619a532014-01-11 13:38:27 -08001525 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001526 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001527 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001528 }
1529
Wang Nan5f9cf592016-04-20 18:59:49 +00001530 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001531 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001532 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001533 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001534 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001535
Wang Nan057374642016-07-14 08:34:43 +00001536 /*
1537 * rec->evlist->bkw_mmap_state is possible to be
1538 * BKW_MMAP_EMPTY here: when done == true and
1539 * hits != rec->samples in previous round.
1540 *
1541 * perf_evlist__toggle_bkw_mmap ensure we never
1542 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1543 */
1544 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1545 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1546
Alexey Budankov470530b2019-03-18 20:40:26 +03001547 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001548 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001549 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001550 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001551 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001552 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001553
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001554 if (auxtrace_record__snapshot_started) {
1555 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001556 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001557 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001558 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001559 pr_err("AUX area tracing snapshot failed\n");
1560 err = -1;
1561 goto out_child;
1562 }
1563 }
1564
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001565 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001566 /*
1567 * If switch_output_trigger is hit, the data in
1568 * overwritable ring buffer should have been collected,
1569 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1570 *
1571 * If SIGUSR2 raise after or during record__mmap_read_all(),
1572 * record__mmap_read_all() didn't collect data from
1573 * overwritable ring buffer. Read again.
1574 */
1575 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1576 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001577 trigger_ready(&switch_output_trigger);
1578
Wang Nan057374642016-07-14 08:34:43 +00001579 /*
1580 * Reenable events in overwrite ring buffer after
1581 * record__mmap_read_all(): we should have collected
1582 * data from it.
1583 */
1584 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1585
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001586 if (!quiet)
1587 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1588 waking);
1589 waking = 0;
1590 fd = record__switch_output(rec, false);
1591 if (fd < 0) {
1592 pr_err("Failed to switch to new file\n");
1593 trigger_error(&switch_output_trigger);
1594 err = fd;
1595 goto out_child;
1596 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001597
1598 /* re-arm the alarm */
1599 if (rec->switch_output.time)
1600 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001601 }
1602
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001603 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001604 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001605 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001606 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001607 /*
1608 * Propagate error, only if there's any. Ignore positive
1609 * number of returned events and interrupt error.
1610 */
1611 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001612 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001613 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001614
1615 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1616 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001617 }
1618
Jiri Olsa774cb492012-11-12 18:34:01 +01001619 /*
1620 * When perf is starting the traced process, at the end events
1621 * die with the process and we wait for that. Thus no need to
1622 * disable events in this case.
1623 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001624 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001625 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001626 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001627 disabled = true;
1628 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001629 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001630
Wang Nan5f9cf592016-04-20 18:59:49 +00001631 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001632 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001633
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001634 if (opts->auxtrace_snapshot_on_exit)
1635 record__auxtrace_snapshot_exit(rec);
1636
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001637 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001638 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001639 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001640 pr_err("Workload failed: %s\n", emsg);
1641 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001642 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001643 }
1644
Namhyung Kime3d59112015-01-29 17:06:44 +09001645 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001646 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001647
Wang Nan4ea648a2016-07-14 08:34:47 +00001648 if (target__none(&rec->opts.target))
1649 record__synthesize_workload(rec, true);
1650
Namhyung Kim45604712014-05-12 09:47:24 +09001651out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001652 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001653 record__aio_mmap_read_sync(rec);
1654
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001655 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1656 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1657 session->header.env.comp_ratio = ratio + 0.5;
1658 }
1659
Namhyung Kim45604712014-05-12 09:47:24 +09001660 if (forks) {
1661 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001662
Namhyung Kim45604712014-05-12 09:47:24 +09001663 if (!child_finished)
1664 kill(rec->evlist->workload.pid, SIGTERM);
1665
1666 wait(&exit_status);
1667
1668 if (err < 0)
1669 status = err;
1670 else if (WIFEXITED(exit_status))
1671 status = WEXITSTATUS(exit_status);
1672 else if (WIFSIGNALED(exit_status))
1673 signr = WTERMSIG(exit_status);
1674 } else
1675 status = err;
1676
Wang Nan4ea648a2016-07-14 08:34:47 +00001677 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001678 /* this will be recalculated during process_buildids() */
1679 rec->samples = 0;
1680
Wang Nanecfd7a92016-04-13 08:21:07 +00001681 if (!err) {
1682 if (!rec->timestamp_filename) {
1683 record__finish_output(rec);
1684 } else {
1685 fd = record__switch_output(rec, true);
1686 if (fd < 0) {
1687 status = fd;
1688 goto out_delete_session;
1689 }
1690 }
1691 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001692
Wang Nana0748652016-11-26 07:03:28 +00001693 perf_hooks__invoke_record_end();
1694
Namhyung Kime3d59112015-01-29 17:06:44 +09001695 if (!err && !quiet) {
1696 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001697 const char *postfix = rec->timestamp_filename ?
1698 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001699
Adrian Hunteref149c22015-04-09 18:53:45 +03001700 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001701 scnprintf(samples, sizeof(samples),
1702 " (%" PRIu64 " samples)", rec->samples);
1703 else
1704 samples[0] = '\0';
1705
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001706 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001707 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001708 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001709 if (ratio) {
1710 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1711 rec->session->bytes_transferred / 1024.0 / 1024.0,
1712 ratio);
1713 }
1714 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001715 }
1716
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001717out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001718 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001719 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001720
1721 if (!opts->no_bpf_event)
1722 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001723 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001724}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001725
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001726static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001727{
Kan Liangaad2b212015-01-05 13:23:04 -05001728 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001729
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001730 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001731
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001732 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001733 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001734 callchain->dump_size);
1735}
1736
1737int record_opts__parse_callchain(struct record_opts *record,
1738 struct callchain_param *callchain,
1739 const char *arg, bool unset)
1740{
1741 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001742 callchain->enabled = !unset;
1743
1744 /* --no-call-graph */
1745 if (unset) {
1746 callchain->record_mode = CALLCHAIN_NONE;
1747 pr_debug("callchain: disabled\n");
1748 return 0;
1749 }
1750
1751 ret = parse_callchain_record_opt(arg, callchain);
1752 if (!ret) {
1753 /* Enable data address sampling for DWARF unwind. */
1754 if (callchain->record_mode == CALLCHAIN_DWARF)
1755 record->sample_address = true;
1756 callchain_debug(callchain);
1757 }
1758
1759 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001760}
1761
Kan Liangc421e802015-07-29 05:42:12 -04001762int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001763 const char *arg,
1764 int unset)
1765{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001766 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001767}
1768
Kan Liangc421e802015-07-29 05:42:12 -04001769int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001770 const char *arg __maybe_unused,
1771 int unset __maybe_unused)
1772{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001773 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001774
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001775 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001776
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001777 if (callchain->record_mode == CALLCHAIN_NONE)
1778 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001779
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001780 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001781 return 0;
1782}
1783
Jiri Olsaeb853e82014-02-03 12:44:42 +01001784static int perf_record_config(const char *var, const char *value, void *cb)
1785{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001786 struct record *rec = cb;
1787
1788 if (!strcmp(var, "record.build-id")) {
1789 if (!strcmp(value, "cache"))
1790 rec->no_buildid_cache = false;
1791 else if (!strcmp(value, "no-cache"))
1792 rec->no_buildid_cache = true;
1793 else if (!strcmp(value, "skip"))
1794 rec->no_buildid = true;
1795 else
1796 return -1;
1797 return 0;
1798 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001799 if (!strcmp(var, "record.call-graph")) {
1800 var = "call-graph.record-mode";
1801 return perf_default_config(var, value, cb);
1802 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001803#ifdef HAVE_AIO_SUPPORT
1804 if (!strcmp(var, "record.aio")) {
1805 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1806 if (!rec->opts.nr_cblocks)
1807 rec->opts.nr_cblocks = nr_cblocks_default;
1808 }
1809#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001810
Yisheng Xiecff17202018-03-12 19:25:57 +08001811 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001812}
1813
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001814struct clockid_map {
1815 const char *name;
1816 int clockid;
1817};
1818
1819#define CLOCKID_MAP(n, c) \
1820 { .name = n, .clockid = (c), }
1821
1822#define CLOCKID_END { .name = NULL, }
1823
1824
1825/*
1826 * Add the missing ones, we need to build on many distros...
1827 */
1828#ifndef CLOCK_MONOTONIC_RAW
1829#define CLOCK_MONOTONIC_RAW 4
1830#endif
1831#ifndef CLOCK_BOOTTIME
1832#define CLOCK_BOOTTIME 7
1833#endif
1834#ifndef CLOCK_TAI
1835#define CLOCK_TAI 11
1836#endif
1837
1838static const struct clockid_map clockids[] = {
1839 /* available for all events, NMI safe */
1840 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1841 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1842
1843 /* available for some events */
1844 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1845 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1846 CLOCKID_MAP("tai", CLOCK_TAI),
1847
1848 /* available for the lazy */
1849 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1850 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1851 CLOCKID_MAP("real", CLOCK_REALTIME),
1852 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1853
1854 CLOCKID_END,
1855};
1856
Alexey Budankovcf790512018-10-09 17:36:24 +03001857static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1858{
1859 struct timespec res;
1860
1861 *res_ns = 0;
1862 if (!clock_getres(clk_id, &res))
1863 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1864 else
1865 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1866
1867 return 0;
1868}
1869
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001870static int parse_clockid(const struct option *opt, const char *str, int unset)
1871{
1872 struct record_opts *opts = (struct record_opts *)opt->value;
1873 const struct clockid_map *cm;
1874 const char *ostr = str;
1875
1876 if (unset) {
1877 opts->use_clockid = 0;
1878 return 0;
1879 }
1880
1881 /* no arg passed */
1882 if (!str)
1883 return 0;
1884
1885 /* no setting it twice */
1886 if (opts->use_clockid)
1887 return -1;
1888
1889 opts->use_clockid = true;
1890
1891 /* if its a number, we're done */
1892 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001893 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001894
1895 /* allow a "CLOCK_" prefix to the name */
1896 if (!strncasecmp(str, "CLOCK_", 6))
1897 str += 6;
1898
1899 for (cm = clockids; cm->name; cm++) {
1900 if (!strcasecmp(str, cm->name)) {
1901 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001902 return get_clockid_res(opts->clockid,
1903 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001904 }
1905 }
1906
1907 opts->use_clockid = false;
1908 ui__warning("unknown clockid %s, check man page\n", ostr);
1909 return -1;
1910}
1911
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001912static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1913{
1914 struct record_opts *opts = (struct record_opts *)opt->value;
1915
1916 if (unset || !str)
1917 return 0;
1918
1919 if (!strcasecmp(str, "node"))
1920 opts->affinity = PERF_AFFINITY_NODE;
1921 else if (!strcasecmp(str, "cpu"))
1922 opts->affinity = PERF_AFFINITY_CPU;
1923
1924 return 0;
1925}
1926
Adrian Huntere9db1312015-04-09 18:53:46 +03001927static int record__parse_mmap_pages(const struct option *opt,
1928 const char *str,
1929 int unset __maybe_unused)
1930{
1931 struct record_opts *opts = opt->value;
1932 char *s, *p;
1933 unsigned int mmap_pages;
1934 int ret;
1935
1936 if (!str)
1937 return -EINVAL;
1938
1939 s = strdup(str);
1940 if (!s)
1941 return -ENOMEM;
1942
1943 p = strchr(s, ',');
1944 if (p)
1945 *p = '\0';
1946
1947 if (*s) {
1948 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1949 if (ret)
1950 goto out_free;
1951 opts->mmap_pages = mmap_pages;
1952 }
1953
1954 if (!p) {
1955 ret = 0;
1956 goto out_free;
1957 }
1958
1959 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1960 if (ret)
1961 goto out_free;
1962
1963 opts->auxtrace_mmap_pages = mmap_pages;
1964
1965out_free:
1966 free(s);
1967 return ret;
1968}
1969
Jiri Olsa0c582442017-01-09 10:51:59 +01001970static void switch_output_size_warn(struct record *rec)
1971{
1972 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1973 struct switch_output *s = &rec->switch_output;
1974
1975 wakeup_size /= 2;
1976
1977 if (s->size < wakeup_size) {
1978 char buf[100];
1979
1980 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1981 pr_warning("WARNING: switch-output data size lower than "
1982 "wakeup kernel buffer size (%s) "
1983 "expect bigger perf.data sizes\n", buf);
1984 }
1985}
1986
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001987static int switch_output_setup(struct record *rec)
1988{
1989 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001990 static struct parse_tag tags_size[] = {
1991 { .tag = 'B', .mult = 1 },
1992 { .tag = 'K', .mult = 1 << 10 },
1993 { .tag = 'M', .mult = 1 << 20 },
1994 { .tag = 'G', .mult = 1 << 30 },
1995 { .tag = 0 },
1996 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001997 static struct parse_tag tags_time[] = {
1998 { .tag = 's', .mult = 1 },
1999 { .tag = 'm', .mult = 60 },
2000 { .tag = 'h', .mult = 60*60 },
2001 { .tag = 'd', .mult = 60*60*24 },
2002 { .tag = 0 },
2003 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002004 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002005
2006 if (!s->set)
2007 return 0;
2008
2009 if (!strcmp(s->str, "signal")) {
2010 s->signal = true;
2011 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002012 goto enabled;
2013 }
2014
2015 val = parse_tag_value(s->str, tags_size);
2016 if (val != (unsigned long) -1) {
2017 s->size = val;
2018 pr_debug("switch-output with %s size threshold\n", s->str);
2019 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002020 }
2021
Jiri Olsabfacbe32017-01-09 10:52:00 +01002022 val = parse_tag_value(s->str, tags_time);
2023 if (val != (unsigned long) -1) {
2024 s->time = val;
2025 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2026 s->str, s->time);
2027 goto enabled;
2028 }
2029
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002030 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002031
2032enabled:
2033 rec->timestamp_filename = true;
2034 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002035
2036 if (s->size && !rec->opts.no_buffering)
2037 switch_output_size_warn(rec);
2038
Jiri Olsadc0c6122017-01-09 10:51:58 +01002039 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002040}
2041
Namhyung Kime5b2c202014-10-23 00:15:46 +09002042static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002043 "perf record [<options>] [<command>]",
2044 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002045 NULL
2046};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002047const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002048
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002049/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002050 * XXX Ideally would be local to cmd_record() and passed to a record__new
2051 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002052 * after cmd_record() exits, but since record_options need to be accessible to
2053 * builtin-script, leave it here.
2054 *
2055 * At least we don't ouch it in all the other functions here directly.
2056 *
2057 * Just say no to tons of global variables, sigh.
2058 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002059static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002060 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002061 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002062 .mmap_pages = UINT_MAX,
2063 .user_freq = UINT_MAX,
2064 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002065 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002066 .target = {
2067 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002068 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002069 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002070 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002071 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002072 .tool = {
2073 .sample = process_sample_event,
2074 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002075 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002076 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302077 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002078 .mmap = perf_event__process_mmap,
2079 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002080 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002081 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002082};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002083
Namhyung Kim76a26542015-10-22 23:28:32 +09002084const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2085 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002086
Wang Nan0aab2132016-06-16 08:02:41 +00002087static bool dry_run;
2088
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002089/*
2090 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2091 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002092 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002093 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2094 * using pipes, etc.
2095 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002096static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002097 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002098 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002099 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002100 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002101 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002102 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2103 NULL, "don't record events from perf itself",
2104 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002105 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002106 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002107 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002108 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002109 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002110 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002111 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002112 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002113 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002114 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002115 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002116 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002117 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002118 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002119 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002120 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002121 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002122 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2123 &record.opts.no_inherit_set,
2124 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002125 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2126 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002127 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002128 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002129 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2130 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002131 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2132 "profile at this frequency",
2133 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002134 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2135 "number of mmap data pages and AUX area tracing mmap pages",
2136 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002137 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2138 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2139 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002140 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002141 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002142 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002143 NULL, "enables call-graph recording" ,
2144 &record_callchain_opt),
2145 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002146 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002147 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002148 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002149 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002150 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002151 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002152 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002153 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002154 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2155 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002156 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002157 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2158 &record.opts.sample_time_set,
2159 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002160 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2161 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002162 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002163 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002164 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2165 &record.no_buildid_cache_set,
2166 "do not update the buildid cache"),
2167 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2168 &record.no_buildid_set,
2169 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002170 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002171 "monitor event in cgroup name only",
2172 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002173 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002174 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002175 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2176 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002177
2178 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2179 "branch any", "sample any taken branches",
2180 parse_branch_stack),
2181
2182 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2183 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002184 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002185 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2186 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002187 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2188 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002189 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2190 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002191 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2192 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002193 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002194 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2195 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002196 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002197 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2198 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002199 OPT_CALLBACK('k', "clockid", &record.opts,
2200 "clockid", "clockid to use for events, see clock_gettime()",
2201 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002202 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2203 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002204 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002205 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302206 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2207 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002208 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2209 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002210 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2211 "Configure all used events to run in kernel space.",
2212 PARSE_OPT_EXCLUSIVE),
2213 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2214 "Configure all used events to run in user space.",
2215 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002216 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2217 "collect kernel callchains"),
2218 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2219 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002220 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2221 "clang binary to use for compiling BPF scriptlets"),
2222 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2223 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002224 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2225 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002226 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2227 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002228 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2229 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002230 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2231 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002232 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002233 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2234 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002235 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002236 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2237 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002238 OPT_BOOLEAN(0, "dry-run", &dry_run,
2239 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002240#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002241 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2242 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002243 record__aio_parse),
2244#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002245 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2246 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2247 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002248#ifdef HAVE_ZSTD_SUPPORT
2249 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2250 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2251 record__parse_comp_level),
2252#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002253 OPT_END()
2254};
2255
Namhyung Kime5b2c202014-10-23 00:15:46 +09002256struct option *record_options = __record_options;
2257
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002258int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002259{
Adrian Hunteref149c22015-04-09 18:53:45 +03002260 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002261 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002262 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002263
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002264 setlocale(LC_ALL, "");
2265
Wang Nan48e1cab2015-12-14 10:39:22 +00002266#ifndef HAVE_LIBBPF_SUPPORT
2267# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2268 set_nobuild('\0', "clang-path", true);
2269 set_nobuild('\0', "clang-opt", true);
2270# undef set_nobuild
2271#endif
2272
He Kuang7efe0e02015-12-14 10:39:23 +00002273#ifndef HAVE_BPF_PROLOGUE
2274# if !defined (HAVE_DWARF_SUPPORT)
2275# define REASON "NO_DWARF=1"
2276# elif !defined (HAVE_LIBBPF_SUPPORT)
2277# define REASON "NO_LIBBPF=1"
2278# else
2279# define REASON "this architecture doesn't support BPF prologue"
2280# endif
2281# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2282 set_nobuild('\0', "vmlinux", true);
2283# undef set_nobuild
2284# undef REASON
2285#endif
2286
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002287 CPU_ZERO(&rec->affinity_mask);
2288 rec->opts.affinity = PERF_AFFINITY_SYS;
2289
Jiri Olsa0f98b112019-07-21 13:23:55 +02002290 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002291 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002292 return -ENOMEM;
2293
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002294 err = perf_config(perf_record_config, rec);
2295 if (err)
2296 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002297
Tom Zanussibca647a2010-11-10 08:11:30 -06002298 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002299 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002300 if (quiet)
2301 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002302
2303 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002304 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002305 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002306
Namhyung Kimbea03402012-04-26 14:15:15 +09002307 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002308 usage_with_options_msg(record_usage, record_options,
2309 "cgroup monitoring only available in system-wide mode");
2310
Stephane Eranian023695d2011-02-14 11:20:01 +02002311 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002312
2313 if (rec->opts.comp_level != 0) {
2314 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2315 rec->no_buildid = true;
2316 }
2317
Adrian Hunterb757bb02015-07-21 12:44:04 +03002318 if (rec->opts.record_switch_events &&
2319 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002320 ui__error("kernel does not support recording context switch events\n");
2321 parse_options_usage(record_usage, record_options, "switch-events", 0);
2322 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002323 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002324
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002325 if (switch_output_setup(rec)) {
2326 parse_options_usage(record_usage, record_options, "switch-output", 0);
2327 return -EINVAL;
2328 }
2329
Jiri Olsabfacbe32017-01-09 10:52:00 +01002330 if (rec->switch_output.time) {
2331 signal(SIGALRM, alarm_sig_handler);
2332 alarm(rec->switch_output.time);
2333 }
2334
Andi Kleen03724b22019-03-14 15:49:55 -07002335 if (rec->switch_output.num_files) {
2336 rec->switch_output.filenames = calloc(sizeof(char *),
2337 rec->switch_output.num_files);
2338 if (!rec->switch_output.filenames)
2339 return -EINVAL;
2340 }
2341
Adrian Hunter1b36c032016-09-23 17:38:39 +03002342 /*
2343 * Allow aliases to facilitate the lookup of symbols for address
2344 * filters. Refer to auxtrace_parse_filters().
2345 */
2346 symbol_conf.allow_aliases = true;
2347
2348 symbol__init(NULL);
2349
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002350 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002351 if (err)
2352 goto out;
2353
Wang Nan0aab2132016-06-16 08:02:41 +00002354 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002355 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002356
Wang Nand7888572016-04-08 15:07:24 +00002357 err = bpf__setup_stdout(rec->evlist);
2358 if (err) {
2359 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2360 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2361 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002362 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002363 }
2364
Adrian Hunteref149c22015-04-09 18:53:45 +03002365 err = -ENOMEM;
2366
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002367 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002368 pr_warning(
2369"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
Igor Lubashevd06e5fa2019-08-26 21:39:16 -04002370"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002371"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2372"file is not found in the buildid cache or in the vmlinux path.\n\n"
2373"Samples in kernel modules won't be resolved at all.\n\n"
2374"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2375"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002376
Wang Nan0c1d46a2016-04-20 18:59:52 +00002377 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002378 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002379 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002380 /*
2381 * In 'perf record --switch-output', disable buildid
2382 * generation by default to reduce data file switching
2383 * overhead. Still generate buildid if they are required
2384 * explicitly using
2385 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002386 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002387 * --no-no-buildid-cache
2388 *
2389 * Following code equals to:
2390 *
2391 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2392 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2393 * disable_buildid_cache();
2394 */
2395 bool disable = true;
2396
2397 if (rec->no_buildid_set && !rec->no_buildid)
2398 disable = false;
2399 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2400 disable = false;
2401 if (disable) {
2402 rec->no_buildid = true;
2403 rec->no_buildid_cache = true;
2404 disable_buildid_cache();
2405 }
2406 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002407
Wang Nan4ea648a2016-07-14 08:34:47 +00002408 if (record.opts.overwrite)
2409 record.opts.tail_synthesize = true;
2410
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002411 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002412 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002413 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002414 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002415 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002416
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002417 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2418 rec->opts.no_inherit = true;
2419
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002420 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002421 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002422 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002423 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002424 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002425
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002426 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002427 if (err) {
2428 int saved_errno = errno;
2429
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002430 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002431 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002432
2433 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002434 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002435 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002436
Mengting Zhangca800062017-12-13 15:01:53 +08002437 /* Enable ignoring missing threads when -u/-p option is defined. */
2438 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002439
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002440 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002441 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002442 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002443
Adrian Hunteref149c22015-04-09 18:53:45 +03002444 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2445 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002446 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002447
Namhyung Kim61566812016-01-11 22:37:09 +09002448 /*
2449 * We take all buildids when the file contains
2450 * AUX area tracing data because we do not decode the
2451 * trace because it would take too long.
2452 */
2453 if (rec->opts.full_auxtrace)
2454 rec->buildid_all = true;
2455
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002456 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002457 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002458 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002459 }
2460
Alexey Budankov93f20c02018-11-06 12:07:19 +03002461 if (rec->opts.nr_cblocks > nr_cblocks_max)
2462 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002463 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002464
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002465 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002466 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002467
Alexey Budankov51255a82019-03-18 20:42:19 +03002468 if (rec->opts.comp_level > comp_level_max)
2469 rec->opts.comp_level = comp_level_max;
2470 pr_debug("comp level: %d\n", rec->opts.comp_level);
2471
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002472 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002473out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002474 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002475 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002476 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002477 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002478}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002479
2480static void snapshot_sig_handler(int sig __maybe_unused)
2481{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002482 struct record *rec = &record;
2483
Wang Nan5f9cf592016-04-20 18:59:49 +00002484 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2485 trigger_hit(&auxtrace_snapshot_trigger);
2486 auxtrace_record__snapshot_started = 1;
2487 if (auxtrace_record__snapshot_start(record.itr))
2488 trigger_error(&auxtrace_snapshot_trigger);
2489 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002490
Jiri Olsadc0c6122017-01-09 10:51:58 +01002491 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002492 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002493}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002494
2495static void alarm_sig_handler(int sig __maybe_unused)
2496{
2497 struct record *rec = &record;
2498
2499 if (switch_output_time(rec))
2500 trigger_hit(&switch_output_trigger);
2501}