blob: b5063d3b6fd077fd8b5b4d35936f44cd6609bae7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -030023#include "util/mmap.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030024#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020025#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020026#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020027#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030028#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030042#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030043#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030044#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080045#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000046#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030047#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020048
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030049#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030050#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030051#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030052#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020053#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020054#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030055#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030056#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030057#include <sys/wait.h>
Adrian Huntereeb399b2019-10-04 11:31:21 +030058#include <sys/types.h>
59#include <sys/stat.h>
60#include <fcntl.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053061#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030062#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030063#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030064#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030065
Jiri Olsa1b43b702017-01-09 10:51:56 +010066struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010067 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010069 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010070 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010071 const char *str;
72 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070073 char **filenames;
74 int num_files;
75 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010076};
77
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030078struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020079 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030080 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010082 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030083 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020084 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020085 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020086 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020087 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000088 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020089 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000090 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090091 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000092 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080093 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010094 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070095 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030096 cpu_set_t affinity_mask;
Jiwei Sun6d575812019-10-22 16:09:01 +080097 unsigned long output_max_size; /* = 0: unlimited */
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020098};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020099
Jiwei Sun6d575812019-10-22 16:09:01 +0800100static volatile int done;
101
Jiri Olsadc0c6122017-01-09 10:51:58 +0100102static volatile int auxtrace_record__snapshot_started;
103static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
104static DEFINE_TRIGGER(switch_output_trigger);
105
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300106static const char *affinity_tags[PERF_AFFINITY_MAX] = {
107 "SYS", "NODE", "CPU"
108};
109
Jiri Olsadc0c6122017-01-09 10:51:58 +0100110static bool switch_output_signal(struct record *rec)
111{
112 return rec->switch_output.signal &&
113 trigger_is_ready(&switch_output_trigger);
114}
115
116static bool switch_output_size(struct record *rec)
117{
118 return rec->switch_output.size &&
119 trigger_is_ready(&switch_output_trigger) &&
120 (rec->bytes_written >= rec->switch_output.size);
121}
122
Jiri Olsabfacbe32017-01-09 10:52:00 +0100123static bool switch_output_time(struct record *rec)
124{
125 return rec->switch_output.time &&
126 trigger_is_ready(&switch_output_trigger);
127}
128
Jiwei Sun6d575812019-10-22 16:09:01 +0800129static bool record__output_max_size_exceeded(struct record *rec)
130{
131 return rec->output_max_size &&
132 (rec->bytes_written >= rec->output_max_size);
133}
134
Jiri Olsaa5830532019-07-27 20:30:53 +0200135static int record__write(struct record *rec, struct mmap *map __maybe_unused,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200136 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200137{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200138 struct perf_data_file *file = &rec->session->data->file;
139
140 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100141 pr_err("failed to write perf data, error: %m\n");
142 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200143 }
David Ahern8d3eca22012-08-26 12:24:47 -0600144
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300145 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100146
Jiwei Sun6d575812019-10-22 16:09:01 +0800147 if (record__output_max_size_exceeded(rec) && !done) {
148 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
149 " stopping session ]\n",
150 rec->bytes_written >> 10);
151 done = 1;
152 }
153
Jiri Olsadc0c6122017-01-09 10:51:58 +0100154 if (switch_output_size(rec))
155 trigger_hit(&switch_output_trigger);
156
David Ahern8d3eca22012-08-26 12:24:47 -0600157 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200158}
159
Alexey Budankovef781122019-03-18 20:44:12 +0300160static int record__aio_enabled(struct record *rec);
161static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300162static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
163 void *src, size_t src_size);
164
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300165#ifdef HAVE_AIO_SUPPORT
166static int record__aio_write(struct aiocb *cblock, int trace_fd,
167 void *buf, size_t size, off_t off)
168{
169 int rc;
170
171 cblock->aio_fildes = trace_fd;
172 cblock->aio_buf = buf;
173 cblock->aio_nbytes = size;
174 cblock->aio_offset = off;
175 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
176
177 do {
178 rc = aio_write(cblock);
179 if (rc == 0) {
180 break;
181 } else if (errno != EAGAIN) {
182 cblock->aio_fildes = -1;
183 pr_err("failed to queue perf data, error: %m\n");
184 break;
185 }
186 } while (1);
187
188 return rc;
189}
190
Jiri Olsaa5830532019-07-27 20:30:53 +0200191static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300192{
193 void *rem_buf;
194 off_t rem_off;
195 size_t rem_size;
196 int rc, aio_errno;
197 ssize_t aio_ret, written;
198
199 aio_errno = aio_error(cblock);
200 if (aio_errno == EINPROGRESS)
201 return 0;
202
203 written = aio_ret = aio_return(cblock);
204 if (aio_ret < 0) {
205 if (aio_errno != EINTR)
206 pr_err("failed to write perf data, error: %m\n");
207 written = 0;
208 }
209
210 rem_size = cblock->aio_nbytes - written;
211
212 if (rem_size == 0) {
213 cblock->aio_fildes = -1;
214 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300215 * md->refcount is incremented in record__aio_pushfn() for
216 * every aio write request started in record__aio_push() so
217 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300218 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200219 perf_mmap__put(&md->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300220 rc = 1;
221 } else {
222 /*
223 * aio write request may require restart with the
224 * reminder if the kernel didn't write whole
225 * chunk at once.
226 */
227 rem_off = cblock->aio_offset + written;
228 rem_buf = (void *)(cblock->aio_buf + written);
229 record__aio_write(cblock, cblock->aio_fildes,
230 rem_buf, rem_size, rem_off);
231 rc = 0;
232 }
233
234 return rc;
235}
236
Jiri Olsaa5830532019-07-27 20:30:53 +0200237static int record__aio_sync(struct mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300238{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300239 struct aiocb **aiocb = md->aio.aiocb;
240 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300241 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300242 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300243
244 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300245 do_suspend = 0;
246 for (i = 0; i < md->aio.nr_cblocks; ++i) {
247 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
248 if (sync_all)
249 aiocb[i] = NULL;
250 else
251 return i;
252 } else {
253 /*
254 * Started aio write is not complete yet
255 * so it has to be waited before the
256 * next allocation.
257 */
258 aiocb[i] = &cblocks[i];
259 do_suspend = 1;
260 }
261 }
262 if (!do_suspend)
263 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300264
Alexey Budankov93f20c02018-11-06 12:07:19 +0300265 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300266 if (!(errno == EAGAIN || errno == EINTR))
267 pr_err("failed to sync perf data, error: %m\n");
268 }
269 } while (1);
270}
271
Alexey Budankovef781122019-03-18 20:44:12 +0300272struct record_aio {
273 struct record *rec;
274 void *data;
275 size_t size;
276};
277
Jiri Olsaa5830532019-07-27 20:30:53 +0200278static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300279{
Alexey Budankovef781122019-03-18 20:44:12 +0300280 struct record_aio *aio = to;
281
282 /*
Jiri Olsa547740f2019-07-27 22:07:44 +0200283 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
Alexey Budankovef781122019-03-18 20:44:12 +0300284 * to release space in the kernel buffer as fast as possible, calling
285 * perf_mmap__consume() from perf_mmap__push() function.
286 *
287 * That lets the kernel to proceed with storing more profiling data into
288 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
289 *
290 * Coping can be done in two steps in case the chunk of profiling data
291 * crosses the upper bound of the kernel buffer. In this case we first move
292 * part of data from map->start till the upper bound and then the reminder
293 * from the beginning of the kernel buffer till the end of the data chunk.
294 */
295
296 if (record__comp_enabled(aio->rec)) {
297 size = zstd_compress(aio->rec->session, aio->data + aio->size,
Jiri Olsabf59b302019-10-07 14:53:11 +0200298 mmap__mmap_len(map) - aio->size,
Alexey Budankovef781122019-03-18 20:44:12 +0300299 buf, size);
300 } else {
301 memcpy(aio->data + aio->size, buf, size);
302 }
303
304 if (!aio->size) {
305 /*
306 * Increment map->refcount to guard map->aio.data[] buffer
307 * from premature deallocation because map object can be
308 * released earlier than aio write request started on
309 * map->aio.data[] buffer is complete.
310 *
311 * perf_mmap__put() is done at record__aio_complete()
312 * after started aio request completion or at record__aio_push()
313 * if the request failed to start.
314 */
Jiri Olsae75710f2019-10-07 14:53:13 +0200315 perf_mmap__get(&map->core);
Alexey Budankovef781122019-03-18 20:44:12 +0300316 }
317
318 aio->size += size;
319
320 return size;
321}
322
Jiri Olsaa5830532019-07-27 20:30:53 +0200323static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
Alexey Budankovef781122019-03-18 20:44:12 +0300324{
325 int ret, idx;
326 int trace_fd = rec->session->data->file.fd;
327 struct record_aio aio = { .rec = rec, .size = 0 };
328
329 /*
330 * Call record__aio_sync() to wait till map->aio.data[] buffer
331 * becomes available after previous aio write operation.
332 */
333
334 idx = record__aio_sync(map, false);
335 aio.data = map->aio.data[idx];
336 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
337 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
338 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300339
340 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300341 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300342 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300343 *off += aio.size;
344 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300345 if (switch_output_size(rec))
346 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300347 } else {
348 /*
349 * Decrement map->refcount incremented in record__aio_pushfn()
350 * back if record__aio_write() operation failed to start, otherwise
351 * map->refcount is decremented in record__aio_complete() after
352 * aio write operation finishes successfully.
353 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200354 perf_mmap__put(&map->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300355 }
356
357 return ret;
358}
359
360static off_t record__aio_get_pos(int trace_fd)
361{
362 return lseek(trace_fd, 0, SEEK_CUR);
363}
364
365static void record__aio_set_pos(int trace_fd, off_t pos)
366{
367 lseek(trace_fd, pos, SEEK_SET);
368}
369
370static void record__aio_mmap_read_sync(struct record *rec)
371{
372 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200373 struct evlist *evlist = rec->evlist;
Jiri Olsaa5830532019-07-27 20:30:53 +0200374 struct mmap *maps = evlist->mmap;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300375
Alexey Budankovef781122019-03-18 20:44:12 +0300376 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300377 return;
378
Jiri Olsac976ee12019-07-30 13:04:59 +0200379 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200380 struct mmap *map = &maps[i];
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300381
Jiri Olsa547740f2019-07-27 22:07:44 +0200382 if (map->core.base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300383 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300384 }
385}
386
387static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300388static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300389
390static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300391 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300392 int unset)
393{
394 struct record_opts *opts = (struct record_opts *)opt->value;
395
Alexey Budankov93f20c02018-11-06 12:07:19 +0300396 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300397 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300398 } else {
399 if (str)
400 opts->nr_cblocks = strtol(str, NULL, 0);
401 if (!opts->nr_cblocks)
402 opts->nr_cblocks = nr_cblocks_default;
403 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300404
405 return 0;
406}
407#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300408static int nr_cblocks_max = 0;
409
Jiri Olsaa5830532019-07-27 20:30:53 +0200410static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
Alexey Budankovef781122019-03-18 20:44:12 +0300411 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300412{
413 return -1;
414}
415
416static off_t record__aio_get_pos(int trace_fd __maybe_unused)
417{
418 return -1;
419}
420
421static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
422{
423}
424
425static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
426{
427}
428#endif
429
430static int record__aio_enabled(struct record *rec)
431{
432 return rec->opts.nr_cblocks > 0;
433}
434
Alexey Budankov470530b2019-03-18 20:40:26 +0300435#define MMAP_FLUSH_DEFAULT 1
436static int record__mmap_flush_parse(const struct option *opt,
437 const char *str,
438 int unset)
439{
440 int flush_max;
441 struct record_opts *opts = (struct record_opts *)opt->value;
442 static struct parse_tag tags[] = {
443 { .tag = 'B', .mult = 1 },
444 { .tag = 'K', .mult = 1 << 10 },
445 { .tag = 'M', .mult = 1 << 20 },
446 { .tag = 'G', .mult = 1 << 30 },
447 { .tag = 0 },
448 };
449
450 if (unset)
451 return 0;
452
453 if (str) {
454 opts->mmap_flush = parse_tag_value(str, tags);
455 if (opts->mmap_flush == (int)-1)
456 opts->mmap_flush = strtol(str, NULL, 0);
457 }
458
459 if (!opts->mmap_flush)
460 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
461
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200462 flush_max = evlist__mmap_size(opts->mmap_pages);
Alexey Budankov470530b2019-03-18 20:40:26 +0300463 flush_max /= 4;
464 if (opts->mmap_flush > flush_max)
465 opts->mmap_flush = flush_max;
466
467 return 0;
468}
469
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300470#ifdef HAVE_ZSTD_SUPPORT
471static unsigned int comp_level_default = 1;
472
473static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
474{
475 struct record_opts *opts = opt->value;
476
477 if (unset) {
478 opts->comp_level = 0;
479 } else {
480 if (str)
481 opts->comp_level = strtol(str, NULL, 0);
482 if (!opts->comp_level)
483 opts->comp_level = comp_level_default;
484 }
485
486 return 0;
487}
488#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300489static unsigned int comp_level_max = 22;
490
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300491static int record__comp_enabled(struct record *rec)
492{
493 return rec->opts.comp_level > 0;
494}
495
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200496static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200497 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300498 struct perf_sample *sample __maybe_unused,
499 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200500{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300501 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200502 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200503}
504
Jiri Olsaa5830532019-07-27 20:30:53 +0200505static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300506{
507 struct record *rec = to;
508
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300509 if (record__comp_enabled(rec)) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200510 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300511 bf = map->data;
512 }
513
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300514 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200515 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300516}
517
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300518static volatile int signr = -1;
519static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000520
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300521static void sig_handler(int sig)
522{
523 if (sig == SIGCHLD)
524 child_finished = 1;
525 else
526 signr = sig;
527
528 done = 1;
529}
530
Wang Nana0748652016-11-26 07:03:28 +0000531static void sigsegv_handler(int sig)
532{
533 perf_hooks__recover();
534 sighandler_dump_stack(sig);
535}
536
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300537static void record__sig_exit(void)
538{
539 if (signr == -1)
540 return;
541
542 signal(signr, SIG_DFL);
543 raise(signr);
544}
545
Adrian Huntere31f0d02015-04-30 17:37:27 +0300546#ifdef HAVE_AUXTRACE_SUPPORT
547
Adrian Hunteref149c22015-04-09 18:53:45 +0300548static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaa5830532019-07-27 20:30:53 +0200549 struct mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300550 union perf_event *event, void *data1,
551 size_t len1, void *data2, size_t len2)
552{
553 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100554 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300555 size_t padding;
556 u8 pad[8] = {0};
557
Adrian Hunter46e201e2019-10-04 11:31:20 +0300558 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300559 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100560 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300561 int err;
562
563 file_offset = lseek(fd, 0, SEEK_CUR);
564 if (file_offset == -1)
565 return -1;
566 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
567 event, file_offset);
568 if (err)
569 return err;
570 }
571
Adrian Hunteref149c22015-04-09 18:53:45 +0300572 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
573 padding = (len1 + len2) & 7;
574 if (padding)
575 padding = 8 - padding;
576
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200577 record__write(rec, map, event, event->header.size);
578 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300579 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200580 record__write(rec, map, data2, len2);
581 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300582
583 return 0;
584}
585
586static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200587 struct mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300588{
589 int ret;
590
Jiri Olsae035f4c2018-09-13 14:54:05 +0200591 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300592 record__process_auxtrace);
593 if (ret < 0)
594 return ret;
595
596 if (ret)
597 rec->samples++;
598
599 return 0;
600}
601
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300602static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200603 struct mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300604{
605 int ret;
606
Jiri Olsae035f4c2018-09-13 14:54:05 +0200607 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300608 record__process_auxtrace,
609 rec->opts.auxtrace_snapshot_size);
610 if (ret < 0)
611 return ret;
612
613 if (ret)
614 rec->samples++;
615
616 return 0;
617}
618
619static int record__auxtrace_read_snapshot_all(struct record *rec)
620{
621 int i;
622 int rc = 0;
623
Jiri Olsac976ee12019-07-30 13:04:59 +0200624 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200625 struct mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300626
Jiri Olsae035f4c2018-09-13 14:54:05 +0200627 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300628 continue;
629
Jiri Olsae035f4c2018-09-13 14:54:05 +0200630 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300631 rc = -1;
632 goto out;
633 }
634 }
635out:
636 return rc;
637}
638
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300639static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300640{
641 pr_debug("Recording AUX area tracing snapshot\n");
642 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000643 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300644 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300645 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000646 trigger_error(&auxtrace_snapshot_trigger);
647 else
648 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300649 }
650}
651
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300652static int record__auxtrace_snapshot_exit(struct record *rec)
653{
654 if (trigger_is_error(&auxtrace_snapshot_trigger))
655 return 0;
656
657 if (!auxtrace_record__snapshot_started &&
658 auxtrace_record__snapshot_start(rec->itr))
659 return -1;
660
661 record__read_auxtrace_snapshot(rec, true);
662 if (trigger_is_error(&auxtrace_snapshot_trigger))
663 return -1;
664
665 return 0;
666}
667
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200668static int record__auxtrace_init(struct record *rec)
669{
670 int err;
671
672 if (!rec->itr) {
673 rec->itr = auxtrace_record__init(rec->evlist, &err);
674 if (err)
675 return err;
676 }
677
678 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
679 rec->opts.auxtrace_snapshot_opts);
680 if (err)
681 return err;
682
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200683 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
684 rec->opts.auxtrace_sample_opts);
685 if (err)
686 return err;
687
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200688 return auxtrace_parse_filters(rec->evlist);
689}
690
Adrian Huntere31f0d02015-04-30 17:37:27 +0300691#else
692
693static inline
694int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsaa5830532019-07-27 20:30:53 +0200695 struct mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300696{
697 return 0;
698}
699
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300700static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300701void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
702 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300703{
704}
705
706static inline
707int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
708{
709 return 0;
710}
711
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300712static inline
713int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
714{
715 return 0;
716}
717
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200718static int record__auxtrace_init(struct record *rec __maybe_unused)
719{
720 return 0;
721}
722
Adrian Huntere31f0d02015-04-30 17:37:27 +0300723#endif
724
Adrian Huntereeb399b2019-10-04 11:31:21 +0300725static bool record__kcore_readable(struct machine *machine)
726{
727 char kcore[PATH_MAX];
728 int fd;
729
730 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
731
732 fd = open(kcore, O_RDONLY);
733 if (fd < 0)
734 return false;
735
736 close(fd);
737
738 return true;
739}
740
741static int record__kcore_copy(struct machine *machine, struct perf_data *data)
742{
743 char from_dir[PATH_MAX];
744 char kcore_dir[PATH_MAX];
745 int ret;
746
747 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
748
749 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
750 if (ret)
751 return ret;
752
753 return kcore_copy(from_dir, kcore_dir);
754}
755
Wang Nancda57a82016-06-27 10:24:03 +0000756static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200757 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000758{
759 struct record_opts *opts = &rec->opts;
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200760 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
761 opts->auxtrace_sample_mode;
Wang Nancda57a82016-06-27 10:24:03 +0000762 char msg[512];
763
Alexey Budankovf13de662019-01-22 20:50:57 +0300764 if (opts->affinity != PERF_AFFINITY_SYS)
765 cpu__setup_cpunode_map();
766
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200767 if (evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000768 opts->auxtrace_mmap_pages,
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200769 auxtrace_overwrite,
Alexey Budankov470530b2019-03-18 20:40:26 +0300770 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300771 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000772 if (errno == EPERM) {
773 pr_err("Permission error mapping pages.\n"
774 "Consider increasing "
775 "/proc/sys/kernel/perf_event_mlock_kb,\n"
776 "or try again with a smaller value of -m/--mmap_pages.\n"
777 "(current value: %u,%u)\n",
778 opts->mmap_pages, opts->auxtrace_mmap_pages);
779 return -errno;
780 } else {
781 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300782 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000783 if (errno)
784 return -errno;
785 else
786 return -EINVAL;
787 }
788 }
789 return 0;
790}
791
792static int record__mmap(struct record *rec)
793{
794 return record__mmap_evlist(rec, rec->evlist);
795}
796
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300797static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200798{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300799 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200800 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200801 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200802 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300803 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600804 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200805
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300806 /*
807 * For initial_delay we need to add a dummy event so that we can track
808 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
809 * real events, the ones asked by the user.
810 */
811 if (opts->initial_delay) {
812 if (perf_evlist__add_dummy(evlist))
813 return -ENOMEM;
814
Jiri Olsa515dbe42019-09-03 10:39:52 +0200815 pos = evlist__first(evlist);
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300816 pos->tracking = 0;
Jiri Olsa515dbe42019-09-03 10:39:52 +0200817 pos = evlist__last(evlist);
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300818 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200819 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300820 }
821
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300822 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100823
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300824 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200825try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200826 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300827 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900828 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300829 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300830 goto try_again;
831 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700832 if ((errno == EINVAL || errno == EBADF) &&
833 pos->leader != pos &&
834 pos->weak_group) {
835 pos = perf_evlist__reset_weak_group(evlist, pos);
836 goto try_again;
837 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300838 rc = -errno;
839 perf_evsel__open_strerror(pos, &opts->target,
840 errno, msg, sizeof(msg));
841 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600842 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300843 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800844
845 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800846 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200847
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -0300848 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
849 pr_warning(
850"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
851"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
852"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
853"file is not found in the buildid cache or in the vmlinux path.\n\n"
854"Samples in kernel modules won't be resolved at all.\n\n"
855"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
856"even with a suitable vmlinux or kallsyms file.\n\n");
857 }
858
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300859 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300860 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300861 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300862 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600863 rc = -1;
864 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100865 }
866
Wang Nancda57a82016-06-27 10:24:03 +0000867 rc = record__mmap(rec);
868 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600869 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200870
Jiri Olsa563aecb2013-06-05 13:35:06 +0200871 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300872 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600873out:
874 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200875}
876
Namhyung Kime3d59112015-01-29 17:06:44 +0900877static int process_sample_event(struct perf_tool *tool,
878 union perf_event *event,
879 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200880 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900881 struct machine *machine)
882{
883 struct record *rec = container_of(tool, struct record, tool);
884
Jin Yao68588ba2017-12-08 21:13:42 +0800885 if (rec->evlist->first_sample_time == 0)
886 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900887
Jin Yao68588ba2017-12-08 21:13:42 +0800888 rec->evlist->last_sample_time = sample->time;
889
890 if (rec->buildid_all)
891 return 0;
892
893 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900894 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
895}
896
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300897static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200898{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200899 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200900
Jiri Olsa45112e82019-02-21 10:41:29 +0100901 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300902 return 0;
903
Namhyung Kim00dc8652014-11-04 10:14:32 +0900904 /*
905 * During this process, it'll load kernel map and replace the
906 * dso->long_name to a real pathname it found. In this case
907 * we prefer the vmlinux path like
908 * /lib/modules/3.16.4/build/vmlinux
909 *
910 * rather than build-id path (in debug directory).
911 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
912 */
913 symbol_conf.ignore_vmlinux_buildid = true;
914
Namhyung Kim61566812016-01-11 22:37:09 +0900915 /*
916 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800917 * so no need to process samples. But if timestamp_boundary is enabled,
918 * it still needs to walk on all samples to get the timestamps of
919 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900920 */
Jin Yao68588ba2017-12-08 21:13:42 +0800921 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900922 rec->tool.sample = NULL;
923
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300924 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200925}
926
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200927static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800928{
929 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200930 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800931 /*
932 *As for guest kernel when processing subcommand record&report,
933 *we arrange module mmap prior to guest kernel mmap and trigger
934 *a preload dso because default guest module symbols are loaded
935 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
936 *method is used to avoid symbol missing when the first addr is
937 *in module instead of in guest kernel.
938 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200939 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200940 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800941 if (err < 0)
942 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300943 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800944
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800945 /*
946 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
947 * have no _text sometimes.
948 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200949 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200950 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800951 if (err < 0)
952 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300953 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800954}
955
Frederic Weisbecker98402802010-05-02 22:05:29 +0200956static struct perf_event_header finished_round_event = {
957 .size = sizeof(struct perf_event_header),
958 .type = PERF_RECORD_FINISHED_ROUND,
959};
960
Jiri Olsaa5830532019-07-27 20:30:53 +0200961static void record__adjust_affinity(struct record *rec, struct mmap *map)
Alexey Budankovf13de662019-01-22 20:50:57 +0300962{
963 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
964 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
965 CPU_ZERO(&rec->affinity_mask);
966 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
967 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
968 }
969}
970
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300971static size_t process_comp_header(void *record, size_t increment)
972{
Jiri Olsa72932372019-08-28 15:57:16 +0200973 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300974 size_t size = sizeof(*event);
975
976 if (increment) {
977 event->header.size += increment;
978 return increment;
979 }
980
981 event->header.type = PERF_RECORD_COMPRESSED;
982 event->header.size = size;
983
984 return size;
985}
986
987static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
988 void *src, size_t src_size)
989{
990 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200991 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300992
993 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
994 max_record_size, process_comp_header);
995
996 session->bytes_transferred += src_size;
997 session->bytes_compressed += compressed;
998
999 return compressed;
1000}
1001
Jiri Olsa63503db2019-07-21 13:23:52 +02001002static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +03001003 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +02001004{
Jiri Olsadcabb502014-07-25 16:56:16 +02001005 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +02001006 int i;
David Ahern8d3eca22012-08-26 12:24:47 -06001007 int rc = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001008 struct mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001009 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +03001010 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001011
Wang Nancb216862016-06-27 10:24:04 +00001012 if (!evlist)
1013 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +03001014
Wang Nan0b72d692017-12-04 16:51:07 +00001015 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +00001016 if (!maps)
1017 return 0;
Wang Nancb216862016-06-27 10:24:04 +00001018
Wang Nan0b72d692017-12-04 16:51:07 +00001019 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +00001020 return 0;
1021
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001022 if (record__aio_enabled(rec))
1023 off = record__aio_get_pos(trace_fd);
1024
Jiri Olsac976ee12019-07-30 13:04:59 +02001025 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001026 u64 flush = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001027 struct mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +00001028
Jiri Olsa547740f2019-07-27 22:07:44 +02001029 if (map->core.base) {
Alexey Budankovf13de662019-01-22 20:50:57 +03001030 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +03001031 if (synch) {
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001032 flush = map->core.flush;
1033 map->core.flush = 1;
Alexey Budankov470530b2019-03-18 20:40:26 +03001034 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001035 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +03001036 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001037 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001038 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001039 rc = -1;
1040 goto out;
1041 }
1042 } else {
Alexey Budankovef781122019-03-18 20:44:12 +03001043 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001044 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +03001045 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001046 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001047 rc = -1;
1048 goto out;
1049 }
David Ahern8d3eca22012-08-26 12:24:47 -06001050 }
Alexey Budankov470530b2019-03-18 20:40:26 +03001051 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001052 map->core.flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -06001053 }
Adrian Hunteref149c22015-04-09 18:53:45 +03001054
Jiri Olsae035f4c2018-09-13 14:54:05 +02001055 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001056 !rec->opts.auxtrace_sample_mode &&
Jiri Olsae035f4c2018-09-13 14:54:05 +02001057 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001058 rc = -1;
1059 goto out;
1060 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001061 }
1062
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001063 if (record__aio_enabled(rec))
1064 record__aio_set_pos(trace_fd, off);
1065
Jiri Olsadcabb502014-07-25 16:56:16 +02001066 /*
1067 * Mark the round finished in case we wrote
1068 * at least one event.
1069 */
1070 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001071 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001072
Wang Nan0b72d692017-12-04 16:51:07 +00001073 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001074 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001075out:
1076 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001077}
1078
Alexey Budankov470530b2019-03-18 20:40:26 +03001079static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001080{
1081 int err;
1082
Alexey Budankov470530b2019-03-18 20:40:26 +03001083 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001084 if (err)
1085 return err;
1086
Alexey Budankov470530b2019-03-18 20:40:26 +03001087 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001088}
1089
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001090static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001091{
David Ahern57706ab2013-11-06 11:41:34 -07001092 struct perf_session *session = rec->session;
1093 int feat;
1094
1095 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1096 perf_header__set_feat(&session->header, feat);
1097
1098 if (rec->no_buildid)
1099 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1100
Jiri Olsace9036a2019-07-21 13:24:23 +02001101 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001102 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1103
1104 if (!rec->opts.branch_stack)
1105 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001106
1107 if (!rec->opts.full_auxtrace)
1108 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001109
Alexey Budankovcf790512018-10-09 17:36:24 +03001110 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1111 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1112
Jiri Olsa258031c2019-03-08 14:47:39 +01001113 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001114 if (!record__comp_enabled(rec))
1115 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001116
Jiri Olsaffa517a2015-10-25 15:51:43 +01001117 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001118}
1119
Wang Nane1ab48b2016-02-26 09:32:10 +00001120static void
1121record__finish_output(struct record *rec)
1122{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001123 struct perf_data *data = &rec->data;
1124 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001125
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001126 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001127 return;
1128
1129 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001130 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001131
1132 if (!rec->no_buildid) {
1133 process_buildids(rec);
1134
1135 if (rec->buildid_all)
1136 dsos__hit_all(rec->session);
1137 }
1138 perf_session__write_header(rec->session, rec->evlist, fd, true);
1139
1140 return;
1141}
1142
Wang Nan4ea648a2016-07-14 08:34:47 +00001143static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001144{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001145 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001146 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001147
Wang Nan4ea648a2016-07-14 08:34:47 +00001148 if (rec->opts.tail_synthesize != tail)
1149 return 0;
1150
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001151 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1152 if (thread_map == NULL)
1153 return -1;
1154
1155 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001156 process_synthesized_event,
1157 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001158 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001159 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001160 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001161}
1162
Wang Nan4ea648a2016-07-14 08:34:47 +00001163static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001164
Wang Nanecfd7a92016-04-13 08:21:07 +00001165static int
1166record__switch_output(struct record *rec, bool at_exit)
1167{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001168 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001169 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001170 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001171
1172 /* Same Size: "2015122520103046"*/
1173 char timestamp[] = "InvalidTimestamp";
1174
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001175 record__aio_mmap_read_sync(rec);
1176
Wang Nan4ea648a2016-07-14 08:34:47 +00001177 record__synthesize(rec, true);
1178 if (target__none(&rec->opts.target))
1179 record__synthesize_workload(rec, true);
1180
Wang Nanecfd7a92016-04-13 08:21:07 +00001181 rec->samples = 0;
1182 record__finish_output(rec);
1183 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1184 if (err) {
1185 pr_err("Failed to get current timestamp\n");
1186 return -EINVAL;
1187 }
1188
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001189 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001190 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001191 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001192 if (fd >= 0 && !at_exit) {
1193 rec->bytes_written = 0;
1194 rec->session->header.data_size = 0;
1195 }
1196
1197 if (!quiet)
1198 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001199 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001200
Andi Kleen03724b22019-03-14 15:49:55 -07001201 if (rec->switch_output.num_files) {
1202 int n = rec->switch_output.cur_file + 1;
1203
1204 if (n >= rec->switch_output.num_files)
1205 n = 0;
1206 rec->switch_output.cur_file = n;
1207 if (rec->switch_output.filenames[n]) {
1208 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001209 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001210 }
1211 rec->switch_output.filenames[n] = new_filename;
1212 } else {
1213 free(new_filename);
1214 }
1215
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001216 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001217 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001218 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001219
Wang Nanbe7b0c92016-04-20 18:59:54 +00001220 /*
1221 * In 'perf record --switch-output' without -a,
1222 * record__synthesize() in record__switch_output() won't
1223 * generate tracking events because there's no thread_map
1224 * in evlist. Which causes newly created perf.data doesn't
1225 * contain map and comm information.
1226 * Create a fake thread_map and directly call
1227 * perf_event__synthesize_thread_map() for those events.
1228 */
1229 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001230 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001231 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001232 return fd;
1233}
1234
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001235static volatile int workload_exec_errno;
1236
1237/*
1238 * perf_evlist__prepare_workload will send a SIGUSR1
1239 * if the fork fails, since we asked by setting its
1240 * want_signal to true.
1241 */
Namhyung Kim45604712014-05-12 09:47:24 +09001242static void workload_exec_failed_signal(int signo __maybe_unused,
1243 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001244 void *ucontext __maybe_unused)
1245{
1246 workload_exec_errno = info->si_value.sival_int;
1247 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001248 child_finished = 1;
1249}
1250
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001251static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001252static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001253
Wang Nanee667f92016-06-27 10:24:05 +00001254static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001255perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001256{
Wang Nanb2cb6152016-07-14 08:34:39 +00001257 if (evlist) {
Jiri Olsa547740f2019-07-27 22:07:44 +02001258 if (evlist->mmap && evlist->mmap[0].core.base)
1259 return evlist->mmap[0].core.base;
1260 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1261 return evlist->overwrite_mmap[0].core.base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001262 }
Wang Nanee667f92016-06-27 10:24:05 +00001263 return NULL;
1264}
1265
Wang Nanc45628b2016-05-24 02:28:59 +00001266static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1267{
Wang Nanee667f92016-06-27 10:24:05 +00001268 const struct perf_event_mmap_page *pc;
1269
1270 pc = perf_evlist__pick_pc(rec->evlist);
1271 if (pc)
1272 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001273 return NULL;
1274}
1275
Wang Nan4ea648a2016-07-14 08:34:47 +00001276static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001277{
1278 struct perf_session *session = rec->session;
1279 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001280 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001281 struct record_opts *opts = &rec->opts;
1282 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001283 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001284 int err = 0;
1285
Wang Nan4ea648a2016-07-14 08:34:47 +00001286 if (rec->opts.tail_synthesize != tail)
1287 return 0;
1288
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001289 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001290 /*
1291 * We need to synthesize events first, because some
1292 * features works on top of them (on report side).
1293 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001294 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001295 process_synthesized_event);
1296 if (err < 0) {
1297 pr_err("Couldn't synthesize attrs.\n");
1298 goto out;
1299 }
1300
Jiri Olsaa2015512018-03-14 10:22:04 +01001301 err = perf_event__synthesize_features(tool, session, rec->evlist,
1302 process_synthesized_event);
1303 if (err < 0) {
1304 pr_err("Couldn't synthesize features.\n");
1305 return err;
1306 }
1307
Jiri Olsace9036a2019-07-21 13:24:23 +02001308 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001309 /*
1310 * FIXME err <= 0 here actually means that
1311 * there were no tracepoints so its not really
1312 * an error, just that we don't need to
1313 * synthesize anything. We really have to
1314 * return this more properly and also
1315 * propagate errors that now are calling die()
1316 */
1317 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1318 process_synthesized_event);
1319 if (err <= 0) {
1320 pr_err("Couldn't record tracing data.\n");
1321 goto out;
1322 }
1323 rec->bytes_written += err;
1324 }
1325 }
1326
Wang Nanc45628b2016-05-24 02:28:59 +00001327 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001328 process_synthesized_event, machine);
1329 if (err)
1330 goto out;
1331
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001332 /* Synthesize id_index before auxtrace_info */
1333 if (rec->opts.auxtrace_sample_mode) {
1334 err = perf_event__synthesize_id_index(tool,
1335 process_synthesized_event,
1336 session->evlist, machine);
1337 if (err)
1338 goto out;
1339 }
1340
Wang Nanc45c86e2016-02-26 09:32:07 +00001341 if (rec->opts.full_auxtrace) {
1342 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1343 session, process_synthesized_event);
1344 if (err)
1345 goto out;
1346 }
1347
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001348 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1349 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1350 machine);
1351 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1352 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1353 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001354
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001355 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1356 machine);
1357 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1358 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1359 "Check /proc/modules permission or run as root.\n");
1360 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001361
1362 if (perf_guest) {
1363 machines__process_guests(&session->machines,
1364 perf_event__synthesize_guest_os, tool);
1365 }
1366
Andi Kleenbfd8f722017-11-17 13:42:58 -08001367 err = perf_event__synthesize_extra_attr(&rec->tool,
1368 rec->evlist,
1369 process_synthesized_event,
1370 data->is_pipe);
1371 if (err)
1372 goto out;
1373
Jiri Olsa03617c22019-07-21 13:24:42 +02001374 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001375 process_synthesized_event,
1376 NULL);
1377 if (err < 0) {
1378 pr_err("Couldn't synthesize thread map.\n");
1379 return err;
1380 }
1381
Jiri Olsaf72f9012019-07-21 13:24:41 +02001382 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001383 process_synthesized_event, NULL);
1384 if (err < 0) {
1385 pr_err("Couldn't synthesize cpu map.\n");
1386 return err;
1387 }
1388
Song Liue5416952019-03-11 22:30:41 -07001389 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001390 machine, opts);
1391 if (err < 0)
1392 pr_warning("Couldn't synthesize bpf events.\n");
1393
Jiri Olsa03617c22019-07-21 13:24:42 +02001394 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001395 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001396 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001397out:
1398 return err;
1399}
1400
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001401static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001402{
David Ahern57706ab2013-11-06 11:41:34 -07001403 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001404 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001405 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001406 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001407 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001408 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001409 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001410 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001411 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001412 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001413 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001414 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001415
Namhyung Kim45604712014-05-12 09:47:24 +09001416 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001417 signal(SIGCHLD, sig_handler);
1418 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001419 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001420 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001421
Hari Bathinif3b36142017-03-08 02:11:43 +05301422 if (rec->opts.record_namespaces)
1423 tool->namespace_events = true;
1424
Jiri Olsadc0c6122017-01-09 10:51:58 +01001425 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001426 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001427 if (rec->opts.auxtrace_snapshot_mode)
1428 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001429 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001430 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001431 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001432 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001433 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001434
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001435 session = perf_session__new(data, false, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301436 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09001437 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301438 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001439 }
1440
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001441 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001442 rec->session = session;
1443
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001444 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1445 pr_err("Compression initialization failed.\n");
1446 return -1;
1447 }
1448
1449 session->header.env.comp_type = PERF_COMP_ZSTD;
1450 session->header.env.comp_level = rec->opts.comp_level;
1451
Adrian Huntereeb399b2019-10-04 11:31:21 +03001452 if (rec->opts.kcore &&
1453 !record__kcore_readable(&session->machines.host)) {
1454 pr_err("ERROR: kcore is not readable.\n");
1455 return -1;
1456 }
1457
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001458 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001459
Alexey Budankovcf790512018-10-09 17:36:24 +03001460 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1461 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1462
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001463 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001464 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001465 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001466 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001467 if (err < 0) {
1468 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001469 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001470 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001471 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001472 }
1473
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001474 /*
1475 * If we have just single event and are sending data
1476 * through pipe, we need to force the ids allocation,
1477 * because we synthesize event name through the pipe
1478 * and need the id for that.
1479 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001480 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001481 rec->opts.sample_id = true;
1482
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001483 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001484 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001485 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001486 }
Jiri Olsaf6fa4372019-08-06 15:14:05 +02001487 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001488
Adrian Huntereeb399b2019-10-04 11:31:21 +03001489 if (rec->opts.kcore) {
1490 err = record__kcore_copy(&session->machines.host, data);
1491 if (err) {
1492 pr_err("ERROR: Failed to copy kcore\n");
1493 goto out_child;
1494 }
1495 }
1496
Wang Nan8690a2a2016-02-22 09:10:32 +00001497 err = bpf__apply_obj_config();
1498 if (err) {
1499 char errbuf[BUFSIZ];
1500
1501 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1502 pr_err("ERROR: Apply config to BPF failed: %s\n",
1503 errbuf);
1504 goto out_child;
1505 }
1506
Adrian Huntercca84822015-08-19 17:29:21 +03001507 /*
1508 * Normally perf_session__new would do this, but it doesn't have the
1509 * evlist.
1510 */
1511 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1512 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1513 rec->tool.ordered_events = false;
1514 }
1515
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001516 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001517 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1518
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001519 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001520 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001521 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001522 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001523 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001524 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001525 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001526 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001527 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001528
David Ahernd3665492012-02-06 15:27:52 -07001529 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001530 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001531 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001532 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001533 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001534 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001535 }
1536
Song Liud56354d2019-03-11 22:30:51 -07001537 if (!opts->no_bpf_event)
1538 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1539
Song Liu657ee552019-03-11 22:30:50 -07001540 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1541 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1542 opts->no_bpf_event = true;
1543 }
1544
Wang Nan4ea648a2016-07-14 08:34:47 +00001545 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001546 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001547 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001548
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001549 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001550 struct sched_param param;
1551
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001552 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001553 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001554 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001555 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001556 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001557 }
1558 }
1559
Jiri Olsa774cb492012-11-12 18:34:01 +01001560 /*
1561 * When perf is starting the traced process, all the events
1562 * (apart from group members) have enable_on_exec=1 set,
1563 * so don't spoil it by prematurely enabling them.
1564 */
Andi Kleen6619a532014-01-11 13:38:27 -08001565 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001566 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001567
Peter Zijlstra856e9662009-12-16 17:55:55 +01001568 /*
1569 * Let the child rip
1570 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001571 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001572 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001573 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301574 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001575
1576 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1577 if (event == NULL) {
1578 err = -ENOMEM;
1579 goto out_child;
1580 }
1581
Namhyung Kime803cf92015-09-22 09:24:55 +09001582 /*
1583 * Some H/W events are generated before COMM event
1584 * which is emitted during exec(), so perf script
1585 * cannot see a correct process name for those events.
1586 * Synthesize COMM event to prevent it.
1587 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301588 tgid = perf_event__synthesize_comm(tool, event,
1589 rec->evlist->workload.pid,
1590 process_synthesized_event,
1591 machine);
1592 free(event);
1593
1594 if (tgid == -1)
1595 goto out_child;
1596
1597 event = malloc(sizeof(event->namespaces) +
1598 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1599 machine->id_hdr_size);
1600 if (event == NULL) {
1601 err = -ENOMEM;
1602 goto out_child;
1603 }
1604
1605 /*
1606 * Synthesize NAMESPACES event for the command specified.
1607 */
1608 perf_event__synthesize_namespaces(tool, event,
1609 rec->evlist->workload.pid,
1610 tgid, process_synthesized_event,
1611 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001612 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001613
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001614 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001615 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001616
Andi Kleen6619a532014-01-11 13:38:27 -08001617 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001618 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001619 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001620 }
1621
Wang Nan5f9cf592016-04-20 18:59:49 +00001622 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001623 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001624 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001625 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001626 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001627
Wang Nan057374642016-07-14 08:34:43 +00001628 /*
1629 * rec->evlist->bkw_mmap_state is possible to be
1630 * BKW_MMAP_EMPTY here: when done == true and
1631 * hits != rec->samples in previous round.
1632 *
1633 * perf_evlist__toggle_bkw_mmap ensure we never
1634 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1635 */
1636 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1637 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1638
Alexey Budankov470530b2019-03-18 20:40:26 +03001639 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001640 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001641 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001642 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001643 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001644 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001645
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001646 if (auxtrace_record__snapshot_started) {
1647 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001648 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001649 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001650 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001651 pr_err("AUX area tracing snapshot failed\n");
1652 err = -1;
1653 goto out_child;
1654 }
1655 }
1656
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001657 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001658 /*
1659 * If switch_output_trigger is hit, the data in
1660 * overwritable ring buffer should have been collected,
1661 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1662 *
1663 * If SIGUSR2 raise after or during record__mmap_read_all(),
1664 * record__mmap_read_all() didn't collect data from
1665 * overwritable ring buffer. Read again.
1666 */
1667 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1668 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001669 trigger_ready(&switch_output_trigger);
1670
Wang Nan057374642016-07-14 08:34:43 +00001671 /*
1672 * Reenable events in overwrite ring buffer after
1673 * record__mmap_read_all(): we should have collected
1674 * data from it.
1675 */
1676 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1677
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001678 if (!quiet)
1679 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1680 waking);
1681 waking = 0;
1682 fd = record__switch_output(rec, false);
1683 if (fd < 0) {
1684 pr_err("Failed to switch to new file\n");
1685 trigger_error(&switch_output_trigger);
1686 err = fd;
1687 goto out_child;
1688 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001689
1690 /* re-arm the alarm */
1691 if (rec->switch_output.time)
1692 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001693 }
1694
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001695 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001696 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001697 break;
Jiri Olsa80ab2982019-08-31 22:48:33 +02001698 err = evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001699 /*
1700 * Propagate error, only if there's any. Ignore positive
1701 * number of returned events and interrupt error.
1702 */
1703 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001704 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001705 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001706
Jiri Olsaf4009e72019-08-16 16:00:45 +02001707 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001708 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001709 }
1710
Jiri Olsa774cb492012-11-12 18:34:01 +01001711 /*
1712 * When perf is starting the traced process, at the end events
1713 * die with the process and we wait for that. Thus no need to
1714 * disable events in this case.
1715 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001716 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001717 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001718 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001719 disabled = true;
1720 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001721 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001722
Wang Nan5f9cf592016-04-20 18:59:49 +00001723 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001724 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001725
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001726 if (opts->auxtrace_snapshot_on_exit)
1727 record__auxtrace_snapshot_exit(rec);
1728
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001729 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001730 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001731 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001732 pr_err("Workload failed: %s\n", emsg);
1733 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001734 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001735 }
1736
Namhyung Kime3d59112015-01-29 17:06:44 +09001737 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001738 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001739
Wang Nan4ea648a2016-07-14 08:34:47 +00001740 if (target__none(&rec->opts.target))
1741 record__synthesize_workload(rec, true);
1742
Namhyung Kim45604712014-05-12 09:47:24 +09001743out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001744 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001745 record__aio_mmap_read_sync(rec);
1746
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001747 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1748 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1749 session->header.env.comp_ratio = ratio + 0.5;
1750 }
1751
Namhyung Kim45604712014-05-12 09:47:24 +09001752 if (forks) {
1753 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001754
Namhyung Kim45604712014-05-12 09:47:24 +09001755 if (!child_finished)
1756 kill(rec->evlist->workload.pid, SIGTERM);
1757
1758 wait(&exit_status);
1759
1760 if (err < 0)
1761 status = err;
1762 else if (WIFEXITED(exit_status))
1763 status = WEXITSTATUS(exit_status);
1764 else if (WIFSIGNALED(exit_status))
1765 signr = WTERMSIG(exit_status);
1766 } else
1767 status = err;
1768
Wang Nan4ea648a2016-07-14 08:34:47 +00001769 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001770 /* this will be recalculated during process_buildids() */
1771 rec->samples = 0;
1772
Wang Nanecfd7a92016-04-13 08:21:07 +00001773 if (!err) {
1774 if (!rec->timestamp_filename) {
1775 record__finish_output(rec);
1776 } else {
1777 fd = record__switch_output(rec, true);
1778 if (fd < 0) {
1779 status = fd;
1780 goto out_delete_session;
1781 }
1782 }
1783 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001784
Wang Nana0748652016-11-26 07:03:28 +00001785 perf_hooks__invoke_record_end();
1786
Namhyung Kime3d59112015-01-29 17:06:44 +09001787 if (!err && !quiet) {
1788 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001789 const char *postfix = rec->timestamp_filename ?
1790 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001791
Adrian Hunteref149c22015-04-09 18:53:45 +03001792 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001793 scnprintf(samples, sizeof(samples),
1794 " (%" PRIu64 " samples)", rec->samples);
1795 else
1796 samples[0] = '\0';
1797
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001798 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001799 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001800 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001801 if (ratio) {
1802 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1803 rec->session->bytes_transferred / 1024.0 / 1024.0,
1804 ratio);
1805 }
1806 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001807 }
1808
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001809out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001810 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001811 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001812
1813 if (!opts->no_bpf_event)
1814 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001815 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001816}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001817
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001818static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001819{
Kan Liangaad2b212015-01-05 13:23:04 -05001820 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001821
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001822 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001823
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001824 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001825 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001826 callchain->dump_size);
1827}
1828
1829int record_opts__parse_callchain(struct record_opts *record,
1830 struct callchain_param *callchain,
1831 const char *arg, bool unset)
1832{
1833 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001834 callchain->enabled = !unset;
1835
1836 /* --no-call-graph */
1837 if (unset) {
1838 callchain->record_mode = CALLCHAIN_NONE;
1839 pr_debug("callchain: disabled\n");
1840 return 0;
1841 }
1842
1843 ret = parse_callchain_record_opt(arg, callchain);
1844 if (!ret) {
1845 /* Enable data address sampling for DWARF unwind. */
1846 if (callchain->record_mode == CALLCHAIN_DWARF)
1847 record->sample_address = true;
1848 callchain_debug(callchain);
1849 }
1850
1851 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001852}
1853
Kan Liangc421e802015-07-29 05:42:12 -04001854int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001855 const char *arg,
1856 int unset)
1857{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001858 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001859}
1860
Kan Liangc421e802015-07-29 05:42:12 -04001861int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001862 const char *arg __maybe_unused,
1863 int unset __maybe_unused)
1864{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001865 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001866
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001867 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001868
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001869 if (callchain->record_mode == CALLCHAIN_NONE)
1870 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001871
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001872 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001873 return 0;
1874}
1875
Jiri Olsaeb853e82014-02-03 12:44:42 +01001876static int perf_record_config(const char *var, const char *value, void *cb)
1877{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001878 struct record *rec = cb;
1879
1880 if (!strcmp(var, "record.build-id")) {
1881 if (!strcmp(value, "cache"))
1882 rec->no_buildid_cache = false;
1883 else if (!strcmp(value, "no-cache"))
1884 rec->no_buildid_cache = true;
1885 else if (!strcmp(value, "skip"))
1886 rec->no_buildid = true;
1887 else
1888 return -1;
1889 return 0;
1890 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001891 if (!strcmp(var, "record.call-graph")) {
1892 var = "call-graph.record-mode";
1893 return perf_default_config(var, value, cb);
1894 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001895#ifdef HAVE_AIO_SUPPORT
1896 if (!strcmp(var, "record.aio")) {
1897 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1898 if (!rec->opts.nr_cblocks)
1899 rec->opts.nr_cblocks = nr_cblocks_default;
1900 }
1901#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001902
Yisheng Xiecff17202018-03-12 19:25:57 +08001903 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001904}
1905
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001906struct clockid_map {
1907 const char *name;
1908 int clockid;
1909};
1910
1911#define CLOCKID_MAP(n, c) \
1912 { .name = n, .clockid = (c), }
1913
1914#define CLOCKID_END { .name = NULL, }
1915
1916
1917/*
1918 * Add the missing ones, we need to build on many distros...
1919 */
1920#ifndef CLOCK_MONOTONIC_RAW
1921#define CLOCK_MONOTONIC_RAW 4
1922#endif
1923#ifndef CLOCK_BOOTTIME
1924#define CLOCK_BOOTTIME 7
1925#endif
1926#ifndef CLOCK_TAI
1927#define CLOCK_TAI 11
1928#endif
1929
1930static const struct clockid_map clockids[] = {
1931 /* available for all events, NMI safe */
1932 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1933 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1934
1935 /* available for some events */
1936 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1937 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1938 CLOCKID_MAP("tai", CLOCK_TAI),
1939
1940 /* available for the lazy */
1941 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1942 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1943 CLOCKID_MAP("real", CLOCK_REALTIME),
1944 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1945
1946 CLOCKID_END,
1947};
1948
Alexey Budankovcf790512018-10-09 17:36:24 +03001949static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1950{
1951 struct timespec res;
1952
1953 *res_ns = 0;
1954 if (!clock_getres(clk_id, &res))
1955 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1956 else
1957 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1958
1959 return 0;
1960}
1961
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001962static int parse_clockid(const struct option *opt, const char *str, int unset)
1963{
1964 struct record_opts *opts = (struct record_opts *)opt->value;
1965 const struct clockid_map *cm;
1966 const char *ostr = str;
1967
1968 if (unset) {
1969 opts->use_clockid = 0;
1970 return 0;
1971 }
1972
1973 /* no arg passed */
1974 if (!str)
1975 return 0;
1976
1977 /* no setting it twice */
1978 if (opts->use_clockid)
1979 return -1;
1980
1981 opts->use_clockid = true;
1982
1983 /* if its a number, we're done */
1984 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001985 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001986
1987 /* allow a "CLOCK_" prefix to the name */
1988 if (!strncasecmp(str, "CLOCK_", 6))
1989 str += 6;
1990
1991 for (cm = clockids; cm->name; cm++) {
1992 if (!strcasecmp(str, cm->name)) {
1993 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001994 return get_clockid_res(opts->clockid,
1995 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001996 }
1997 }
1998
1999 opts->use_clockid = false;
2000 ui__warning("unknown clockid %s, check man page\n", ostr);
2001 return -1;
2002}
2003
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002004static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2005{
2006 struct record_opts *opts = (struct record_opts *)opt->value;
2007
2008 if (unset || !str)
2009 return 0;
2010
2011 if (!strcasecmp(str, "node"))
2012 opts->affinity = PERF_AFFINITY_NODE;
2013 else if (!strcasecmp(str, "cpu"))
2014 opts->affinity = PERF_AFFINITY_CPU;
2015
2016 return 0;
2017}
2018
Jiwei Sun6d575812019-10-22 16:09:01 +08002019static int parse_output_max_size(const struct option *opt,
2020 const char *str, int unset)
2021{
2022 unsigned long *s = (unsigned long *)opt->value;
2023 static struct parse_tag tags_size[] = {
2024 { .tag = 'B', .mult = 1 },
2025 { .tag = 'K', .mult = 1 << 10 },
2026 { .tag = 'M', .mult = 1 << 20 },
2027 { .tag = 'G', .mult = 1 << 30 },
2028 { .tag = 0 },
2029 };
2030 unsigned long val;
2031
2032 if (unset) {
2033 *s = 0;
2034 return 0;
2035 }
2036
2037 val = parse_tag_value(str, tags_size);
2038 if (val != (unsigned long) -1) {
2039 *s = val;
2040 return 0;
2041 }
2042
2043 return -1;
2044}
2045
Adrian Huntere9db1312015-04-09 18:53:46 +03002046static int record__parse_mmap_pages(const struct option *opt,
2047 const char *str,
2048 int unset __maybe_unused)
2049{
2050 struct record_opts *opts = opt->value;
2051 char *s, *p;
2052 unsigned int mmap_pages;
2053 int ret;
2054
2055 if (!str)
2056 return -EINVAL;
2057
2058 s = strdup(str);
2059 if (!s)
2060 return -ENOMEM;
2061
2062 p = strchr(s, ',');
2063 if (p)
2064 *p = '\0';
2065
2066 if (*s) {
2067 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
2068 if (ret)
2069 goto out_free;
2070 opts->mmap_pages = mmap_pages;
2071 }
2072
2073 if (!p) {
2074 ret = 0;
2075 goto out_free;
2076 }
2077
2078 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
2079 if (ret)
2080 goto out_free;
2081
2082 opts->auxtrace_mmap_pages = mmap_pages;
2083
2084out_free:
2085 free(s);
2086 return ret;
2087}
2088
Jiri Olsa0c582442017-01-09 10:51:59 +01002089static void switch_output_size_warn(struct record *rec)
2090{
Jiri Olsa9521b5f2019-07-28 12:45:35 +02002091 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
Jiri Olsa0c582442017-01-09 10:51:59 +01002092 struct switch_output *s = &rec->switch_output;
2093
2094 wakeup_size /= 2;
2095
2096 if (s->size < wakeup_size) {
2097 char buf[100];
2098
2099 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
2100 pr_warning("WARNING: switch-output data size lower than "
2101 "wakeup kernel buffer size (%s) "
2102 "expect bigger perf.data sizes\n", buf);
2103 }
2104}
2105
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002106static int switch_output_setup(struct record *rec)
2107{
2108 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002109 static struct parse_tag tags_size[] = {
2110 { .tag = 'B', .mult = 1 },
2111 { .tag = 'K', .mult = 1 << 10 },
2112 { .tag = 'M', .mult = 1 << 20 },
2113 { .tag = 'G', .mult = 1 << 30 },
2114 { .tag = 0 },
2115 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002116 static struct parse_tag tags_time[] = {
2117 { .tag = 's', .mult = 1 },
2118 { .tag = 'm', .mult = 60 },
2119 { .tag = 'h', .mult = 60*60 },
2120 { .tag = 'd', .mult = 60*60*24 },
2121 { .tag = 0 },
2122 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002123 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002124
2125 if (!s->set)
2126 return 0;
2127
2128 if (!strcmp(s->str, "signal")) {
2129 s->signal = true;
2130 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002131 goto enabled;
2132 }
2133
2134 val = parse_tag_value(s->str, tags_size);
2135 if (val != (unsigned long) -1) {
2136 s->size = val;
2137 pr_debug("switch-output with %s size threshold\n", s->str);
2138 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002139 }
2140
Jiri Olsabfacbe32017-01-09 10:52:00 +01002141 val = parse_tag_value(s->str, tags_time);
2142 if (val != (unsigned long) -1) {
2143 s->time = val;
2144 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2145 s->str, s->time);
2146 goto enabled;
2147 }
2148
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002149 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002150
2151enabled:
2152 rec->timestamp_filename = true;
2153 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002154
2155 if (s->size && !rec->opts.no_buffering)
2156 switch_output_size_warn(rec);
2157
Jiri Olsadc0c6122017-01-09 10:51:58 +01002158 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002159}
2160
Namhyung Kime5b2c202014-10-23 00:15:46 +09002161static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002162 "perf record [<options>] [<command>]",
2163 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002164 NULL
2165};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002166const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002167
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002168static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
2169 struct perf_sample *sample, struct machine *machine)
2170{
2171 /*
2172 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2173 * no need to add them twice.
2174 */
2175 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2176 return 0;
2177 return perf_event__process_mmap(tool, event, sample, machine);
2178}
2179
2180static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
2181 struct perf_sample *sample, struct machine *machine)
2182{
2183 /*
2184 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2185 * no need to add them twice.
2186 */
2187 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2188 return 0;
2189
2190 return perf_event__process_mmap2(tool, event, sample, machine);
2191}
2192
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002193/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002194 * XXX Ideally would be local to cmd_record() and passed to a record__new
2195 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002196 * after cmd_record() exits, but since record_options need to be accessible to
2197 * builtin-script, leave it here.
2198 *
2199 * At least we don't ouch it in all the other functions here directly.
2200 *
2201 * Just say no to tons of global variables, sigh.
2202 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002203static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002204 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002205 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002206 .mmap_pages = UINT_MAX,
2207 .user_freq = UINT_MAX,
2208 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002209 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002210 .target = {
2211 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002212 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002213 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002214 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002215 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002216 .tool = {
2217 .sample = process_sample_event,
2218 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002219 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002220 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302221 .namespaces = perf_event__process_namespaces,
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002222 .mmap = build_id__process_mmap,
2223 .mmap2 = build_id__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002224 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002225 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002226};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002227
Namhyung Kim76a26542015-10-22 23:28:32 +09002228const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2229 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002230
Wang Nan0aab2132016-06-16 08:02:41 +00002231static bool dry_run;
2232
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002233/*
2234 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2235 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002236 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002237 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2238 * using pipes, etc.
2239 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002240static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002241 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002242 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002243 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002244 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002245 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002246 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2247 NULL, "don't record events from perf itself",
2248 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002249 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002250 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002251 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002252 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002253 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002254 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002255 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002256 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002257 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002258 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002259 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002260 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002261 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002262 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002263 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002264 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002265 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002266 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2267 &record.opts.no_inherit_set,
2268 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002269 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2270 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002271 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002272 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002273 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2274 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002275 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2276 "profile at this frequency",
2277 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002278 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2279 "number of mmap data pages and AUX area tracing mmap pages",
2280 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002281 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2282 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2283 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002284 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002285 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002286 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002287 NULL, "enables call-graph recording" ,
2288 &record_callchain_opt),
2289 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002290 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002291 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002292 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002293 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002294 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002295 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002296 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002297 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002298 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2299 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002300 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002301 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2302 &record.opts.sample_time_set,
2303 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002304 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2305 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002306 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002307 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002308 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2309 &record.no_buildid_cache_set,
2310 "do not update the buildid cache"),
2311 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2312 &record.no_buildid_set,
2313 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002314 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002315 "monitor event in cgroup name only",
2316 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002317 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002318 "ms to wait before starting measurement after program start"),
Adrian Huntereeb399b2019-10-04 11:31:21 +03002319 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002320 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2321 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002322
2323 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2324 "branch any", "sample any taken branches",
2325 parse_branch_stack),
2326
2327 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2328 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002329 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002330 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2331 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002332 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2333 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002334 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2335 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002336 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2337 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002338 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002339 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2340 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002341 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002342 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2343 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002344 OPT_CALLBACK('k', "clockid", &record.opts,
2345 "clockid", "clockid to use for events, see clock_gettime()",
2346 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002347 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2348 "opts", "AUX area tracing Snapshot Mode", ""),
Adrian Hunterc0a6de02019-11-15 14:42:16 +02002349 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
2350 "opts", "sample AUX area", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002351 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002352 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302353 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2354 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002355 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2356 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002357 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2358 "Configure all used events to run in kernel space.",
2359 PARSE_OPT_EXCLUSIVE),
2360 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2361 "Configure all used events to run in user space.",
2362 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002363 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2364 "collect kernel callchains"),
2365 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2366 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002367 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2368 "clang binary to use for compiling BPF scriptlets"),
2369 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2370 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002371 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2372 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002373 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2374 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002375 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2376 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002377 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2378 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002379 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002380 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2381 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002382 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002383 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2384 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002385 OPT_BOOLEAN(0, "dry-run", &dry_run,
2386 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002387#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002388 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2389 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002390 record__aio_parse),
2391#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002392 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2393 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2394 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002395#ifdef HAVE_ZSTD_SUPPORT
2396 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2397 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2398 record__parse_comp_level),
2399#endif
Jiwei Sun6d575812019-10-22 16:09:01 +08002400 OPT_CALLBACK(0, "max-size", &record.output_max_size,
2401 "size", "Limit the maximum size of the output file", parse_output_max_size),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002402 OPT_END()
2403};
2404
Namhyung Kime5b2c202014-10-23 00:15:46 +09002405struct option *record_options = __record_options;
2406
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002407int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002408{
Adrian Hunteref149c22015-04-09 18:53:45 +03002409 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002410 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002411 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002412
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002413 setlocale(LC_ALL, "");
2414
Wang Nan48e1cab2015-12-14 10:39:22 +00002415#ifndef HAVE_LIBBPF_SUPPORT
2416# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2417 set_nobuild('\0', "clang-path", true);
2418 set_nobuild('\0', "clang-opt", true);
2419# undef set_nobuild
2420#endif
2421
He Kuang7efe0e02015-12-14 10:39:23 +00002422#ifndef HAVE_BPF_PROLOGUE
2423# if !defined (HAVE_DWARF_SUPPORT)
2424# define REASON "NO_DWARF=1"
2425# elif !defined (HAVE_LIBBPF_SUPPORT)
2426# define REASON "NO_LIBBPF=1"
2427# else
2428# define REASON "this architecture doesn't support BPF prologue"
2429# endif
2430# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2431 set_nobuild('\0', "vmlinux", true);
2432# undef set_nobuild
2433# undef REASON
2434#endif
2435
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002436 CPU_ZERO(&rec->affinity_mask);
2437 rec->opts.affinity = PERF_AFFINITY_SYS;
2438
Jiri Olsa0f98b112019-07-21 13:23:55 +02002439 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002440 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002441 return -ENOMEM;
2442
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002443 err = perf_config(perf_record_config, rec);
2444 if (err)
2445 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002446
Tom Zanussibca647a2010-11-10 08:11:30 -06002447 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002448 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002449 if (quiet)
2450 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002451
2452 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002453 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002454 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002455
Namhyung Kimbea03402012-04-26 14:15:15 +09002456 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002457 usage_with_options_msg(record_usage, record_options,
2458 "cgroup monitoring only available in system-wide mode");
2459
Stephane Eranian023695d2011-02-14 11:20:01 +02002460 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002461
Adrian Huntereeb399b2019-10-04 11:31:21 +03002462 if (rec->opts.kcore)
2463 rec->data.is_dir = true;
2464
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002465 if (rec->opts.comp_level != 0) {
2466 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2467 rec->no_buildid = true;
2468 }
2469
Adrian Hunterb757bb02015-07-21 12:44:04 +03002470 if (rec->opts.record_switch_events &&
2471 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002472 ui__error("kernel does not support recording context switch events\n");
2473 parse_options_usage(record_usage, record_options, "switch-events", 0);
2474 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002475 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002476
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002477 if (switch_output_setup(rec)) {
2478 parse_options_usage(record_usage, record_options, "switch-output", 0);
2479 return -EINVAL;
2480 }
2481
Jiri Olsabfacbe32017-01-09 10:52:00 +01002482 if (rec->switch_output.time) {
2483 signal(SIGALRM, alarm_sig_handler);
2484 alarm(rec->switch_output.time);
2485 }
2486
Andi Kleen03724b22019-03-14 15:49:55 -07002487 if (rec->switch_output.num_files) {
2488 rec->switch_output.filenames = calloc(sizeof(char *),
2489 rec->switch_output.num_files);
2490 if (!rec->switch_output.filenames)
2491 return -EINVAL;
2492 }
2493
Adrian Hunter1b36c032016-09-23 17:38:39 +03002494 /*
2495 * Allow aliases to facilitate the lookup of symbols for address
2496 * filters. Refer to auxtrace_parse_filters().
2497 */
2498 symbol_conf.allow_aliases = true;
2499
2500 symbol__init(NULL);
2501
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002502 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002503 if (err)
2504 goto out;
2505
Wang Nan0aab2132016-06-16 08:02:41 +00002506 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002507 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002508
Wang Nand7888572016-04-08 15:07:24 +00002509 err = bpf__setup_stdout(rec->evlist);
2510 if (err) {
2511 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2512 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2513 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002514 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002515 }
2516
Adrian Hunteref149c22015-04-09 18:53:45 +03002517 err = -ENOMEM;
2518
Wang Nan0c1d46a2016-04-20 18:59:52 +00002519 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002520 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002521 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002522 /*
2523 * In 'perf record --switch-output', disable buildid
2524 * generation by default to reduce data file switching
2525 * overhead. Still generate buildid if they are required
2526 * explicitly using
2527 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002528 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002529 * --no-no-buildid-cache
2530 *
2531 * Following code equals to:
2532 *
2533 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2534 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2535 * disable_buildid_cache();
2536 */
2537 bool disable = true;
2538
2539 if (rec->no_buildid_set && !rec->no_buildid)
2540 disable = false;
2541 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2542 disable = false;
2543 if (disable) {
2544 rec->no_buildid = true;
2545 rec->no_buildid_cache = true;
2546 disable_buildid_cache();
2547 }
2548 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002549
Wang Nan4ea648a2016-07-14 08:34:47 +00002550 if (record.opts.overwrite)
2551 record.opts.tail_synthesize = true;
2552
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002553 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002554 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002555 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002556 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002557 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002558
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002559 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2560 rec->opts.no_inherit = true;
2561
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002562 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002563 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002564 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002565 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002566 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002567
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002568 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002569 if (err) {
2570 int saved_errno = errno;
2571
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002572 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002573 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002574
2575 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002576 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002577 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002578
Mengting Zhangca800062017-12-13 15:01:53 +08002579 /* Enable ignoring missing threads when -u/-p option is defined. */
2580 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002581
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002582 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002583 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002584 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002585
Adrian Hunteref149c22015-04-09 18:53:45 +03002586 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2587 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002588 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002589
Namhyung Kim61566812016-01-11 22:37:09 +09002590 /*
2591 * We take all buildids when the file contains
2592 * AUX area tracing data because we do not decode the
2593 * trace because it would take too long.
2594 */
2595 if (rec->opts.full_auxtrace)
2596 rec->buildid_all = true;
2597
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002598 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002599 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002600 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002601 }
2602
Alexey Budankov93f20c02018-11-06 12:07:19 +03002603 if (rec->opts.nr_cblocks > nr_cblocks_max)
2604 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002605 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002606
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002607 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002608 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002609
Alexey Budankov51255a82019-03-18 20:42:19 +03002610 if (rec->opts.comp_level > comp_level_max)
2611 rec->opts.comp_level = comp_level_max;
2612 pr_debug("comp level: %d\n", rec->opts.comp_level);
2613
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002614 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002615out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002616 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002617 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002618 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002619 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002620}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002621
2622static void snapshot_sig_handler(int sig __maybe_unused)
2623{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002624 struct record *rec = &record;
2625
Wang Nan5f9cf592016-04-20 18:59:49 +00002626 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2627 trigger_hit(&auxtrace_snapshot_trigger);
2628 auxtrace_record__snapshot_started = 1;
2629 if (auxtrace_record__snapshot_start(record.itr))
2630 trigger_error(&auxtrace_snapshot_trigger);
2631 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002632
Jiri Olsadc0c6122017-01-09 10:51:58 +01002633 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002634 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002635}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002636
2637static void alarm_sig_handler(int sig __maybe_unused)
2638{
2639 struct record *rec = &record;
2640
2641 if (switch_output_time(rec))
2642 trigger_hit(&switch_output_trigger);
2643}