blob: a0bd9104fae6e246d9e4a1a0348ac4804cf7c8a8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 struct perf_evlist *evlist;
77 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankovef781122019-03-18 20:44:12 +0300136static int record__aio_enabled(struct record *rec);
137static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300138static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
139 void *src, size_t src_size);
140
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300141#ifdef HAVE_AIO_SUPPORT
142static int record__aio_write(struct aiocb *cblock, int trace_fd,
143 void *buf, size_t size, off_t off)
144{
145 int rc;
146
147 cblock->aio_fildes = trace_fd;
148 cblock->aio_buf = buf;
149 cblock->aio_nbytes = size;
150 cblock->aio_offset = off;
151 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
152
153 do {
154 rc = aio_write(cblock);
155 if (rc == 0) {
156 break;
157 } else if (errno != EAGAIN) {
158 cblock->aio_fildes = -1;
159 pr_err("failed to queue perf data, error: %m\n");
160 break;
161 }
162 } while (1);
163
164 return rc;
165}
166
167static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
168{
169 void *rem_buf;
170 off_t rem_off;
171 size_t rem_size;
172 int rc, aio_errno;
173 ssize_t aio_ret, written;
174
175 aio_errno = aio_error(cblock);
176 if (aio_errno == EINPROGRESS)
177 return 0;
178
179 written = aio_ret = aio_return(cblock);
180 if (aio_ret < 0) {
181 if (aio_errno != EINTR)
182 pr_err("failed to write perf data, error: %m\n");
183 written = 0;
184 }
185
186 rem_size = cblock->aio_nbytes - written;
187
188 if (rem_size == 0) {
189 cblock->aio_fildes = -1;
190 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300191 * md->refcount is incremented in record__aio_pushfn() for
192 * every aio write request started in record__aio_push() so
193 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300194 */
195 perf_mmap__put(md);
196 rc = 1;
197 } else {
198 /*
199 * aio write request may require restart with the
200 * reminder if the kernel didn't write whole
201 * chunk at once.
202 */
203 rem_off = cblock->aio_offset + written;
204 rem_buf = (void *)(cblock->aio_buf + written);
205 record__aio_write(cblock, cblock->aio_fildes,
206 rem_buf, rem_size, rem_off);
207 rc = 0;
208 }
209
210 return rc;
211}
212
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300214{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300215 struct aiocb **aiocb = md->aio.aiocb;
216 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300217 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300218 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300219
220 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300221 do_suspend = 0;
222 for (i = 0; i < md->aio.nr_cblocks; ++i) {
223 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
224 if (sync_all)
225 aiocb[i] = NULL;
226 else
227 return i;
228 } else {
229 /*
230 * Started aio write is not complete yet
231 * so it has to be waited before the
232 * next allocation.
233 */
234 aiocb[i] = &cblocks[i];
235 do_suspend = 1;
236 }
237 }
238 if (!do_suspend)
239 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300240
Alexey Budankov93f20c02018-11-06 12:07:19 +0300241 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300242 if (!(errno == EAGAIN || errno == EINTR))
243 pr_err("failed to sync perf data, error: %m\n");
244 }
245 } while (1);
246}
247
Alexey Budankovef781122019-03-18 20:44:12 +0300248struct record_aio {
249 struct record *rec;
250 void *data;
251 size_t size;
252};
253
254static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300255{
Alexey Budankovef781122019-03-18 20:44:12 +0300256 struct record_aio *aio = to;
257
258 /*
259 * map->base data pointed by buf is copied into free map->aio.data[] buffer
260 * to release space in the kernel buffer as fast as possible, calling
261 * perf_mmap__consume() from perf_mmap__push() function.
262 *
263 * That lets the kernel to proceed with storing more profiling data into
264 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
265 *
266 * Coping can be done in two steps in case the chunk of profiling data
267 * crosses the upper bound of the kernel buffer. In this case we first move
268 * part of data from map->start till the upper bound and then the reminder
269 * from the beginning of the kernel buffer till the end of the data chunk.
270 */
271
272 if (record__comp_enabled(aio->rec)) {
273 size = zstd_compress(aio->rec->session, aio->data + aio->size,
274 perf_mmap__mmap_len(map) - aio->size,
275 buf, size);
276 } else {
277 memcpy(aio->data + aio->size, buf, size);
278 }
279
280 if (!aio->size) {
281 /*
282 * Increment map->refcount to guard map->aio.data[] buffer
283 * from premature deallocation because map object can be
284 * released earlier than aio write request started on
285 * map->aio.data[] buffer is complete.
286 *
287 * perf_mmap__put() is done at record__aio_complete()
288 * after started aio request completion or at record__aio_push()
289 * if the request failed to start.
290 */
291 perf_mmap__get(map);
292 }
293
294 aio->size += size;
295
296 return size;
297}
298
299static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
300{
301 int ret, idx;
302 int trace_fd = rec->session->data->file.fd;
303 struct record_aio aio = { .rec = rec, .size = 0 };
304
305 /*
306 * Call record__aio_sync() to wait till map->aio.data[] buffer
307 * becomes available after previous aio write operation.
308 */
309
310 idx = record__aio_sync(map, false);
311 aio.data = map->aio.data[idx];
312 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
313 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
314 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300315
316 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300317 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300318 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300319 *off += aio.size;
320 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300321 if (switch_output_size(rec))
322 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300323 } else {
324 /*
325 * Decrement map->refcount incremented in record__aio_pushfn()
326 * back if record__aio_write() operation failed to start, otherwise
327 * map->refcount is decremented in record__aio_complete() after
328 * aio write operation finishes successfully.
329 */
330 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300331 }
332
333 return ret;
334}
335
336static off_t record__aio_get_pos(int trace_fd)
337{
338 return lseek(trace_fd, 0, SEEK_CUR);
339}
340
341static void record__aio_set_pos(int trace_fd, off_t pos)
342{
343 lseek(trace_fd, pos, SEEK_SET);
344}
345
346static void record__aio_mmap_read_sync(struct record *rec)
347{
348 int i;
349 struct perf_evlist *evlist = rec->evlist;
350 struct perf_mmap *maps = evlist->mmap;
351
Alexey Budankovef781122019-03-18 20:44:12 +0300352 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300353 return;
354
355 for (i = 0; i < evlist->nr_mmaps; i++) {
356 struct perf_mmap *map = &maps[i];
357
358 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300359 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300360 }
361}
362
363static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300364static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300365
366static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300367 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300368 int unset)
369{
370 struct record_opts *opts = (struct record_opts *)opt->value;
371
Alexey Budankov93f20c02018-11-06 12:07:19 +0300372 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300373 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300374 } else {
375 if (str)
376 opts->nr_cblocks = strtol(str, NULL, 0);
377 if (!opts->nr_cblocks)
378 opts->nr_cblocks = nr_cblocks_default;
379 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300380
381 return 0;
382}
383#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300384static int nr_cblocks_max = 0;
385
Alexey Budankovef781122019-03-18 20:44:12 +0300386static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
387 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300388{
389 return -1;
390}
391
392static off_t record__aio_get_pos(int trace_fd __maybe_unused)
393{
394 return -1;
395}
396
397static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
398{
399}
400
401static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
402{
403}
404#endif
405
406static int record__aio_enabled(struct record *rec)
407{
408 return rec->opts.nr_cblocks > 0;
409}
410
Alexey Budankov470530b2019-03-18 20:40:26 +0300411#define MMAP_FLUSH_DEFAULT 1
412static int record__mmap_flush_parse(const struct option *opt,
413 const char *str,
414 int unset)
415{
416 int flush_max;
417 struct record_opts *opts = (struct record_opts *)opt->value;
418 static struct parse_tag tags[] = {
419 { .tag = 'B', .mult = 1 },
420 { .tag = 'K', .mult = 1 << 10 },
421 { .tag = 'M', .mult = 1 << 20 },
422 { .tag = 'G', .mult = 1 << 30 },
423 { .tag = 0 },
424 };
425
426 if (unset)
427 return 0;
428
429 if (str) {
430 opts->mmap_flush = parse_tag_value(str, tags);
431 if (opts->mmap_flush == (int)-1)
432 opts->mmap_flush = strtol(str, NULL, 0);
433 }
434
435 if (!opts->mmap_flush)
436 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
437
438 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
439 flush_max /= 4;
440 if (opts->mmap_flush > flush_max)
441 opts->mmap_flush = flush_max;
442
443 return 0;
444}
445
Alexey Budankov51255a82019-03-18 20:42:19 +0300446static unsigned int comp_level_max = 22;
447
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300448static int record__comp_enabled(struct record *rec)
449{
450 return rec->opts.comp_level > 0;
451}
452
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200453static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200454 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300455 struct perf_sample *sample __maybe_unused,
456 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200457{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300458 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200459 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200460}
461
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200462static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300463{
464 struct record *rec = to;
465
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300466 if (record__comp_enabled(rec)) {
467 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
468 bf = map->data;
469 }
470
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300471 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200472 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300473}
474
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300475static volatile int done;
476static volatile int signr = -1;
477static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000478
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300479static void sig_handler(int sig)
480{
481 if (sig == SIGCHLD)
482 child_finished = 1;
483 else
484 signr = sig;
485
486 done = 1;
487}
488
Wang Nana0748652016-11-26 07:03:28 +0000489static void sigsegv_handler(int sig)
490{
491 perf_hooks__recover();
492 sighandler_dump_stack(sig);
493}
494
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300495static void record__sig_exit(void)
496{
497 if (signr == -1)
498 return;
499
500 signal(signr, SIG_DFL);
501 raise(signr);
502}
503
Adrian Huntere31f0d02015-04-30 17:37:27 +0300504#ifdef HAVE_AUXTRACE_SUPPORT
505
Adrian Hunteref149c22015-04-09 18:53:45 +0300506static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200507 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300508 union perf_event *event, void *data1,
509 size_t len1, void *data2, size_t len2)
510{
511 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100512 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300513 size_t padding;
514 u8 pad[8] = {0};
515
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100516 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300517 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100518 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300519 int err;
520
521 file_offset = lseek(fd, 0, SEEK_CUR);
522 if (file_offset == -1)
523 return -1;
524 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
525 event, file_offset);
526 if (err)
527 return err;
528 }
529
Adrian Hunteref149c22015-04-09 18:53:45 +0300530 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
531 padding = (len1 + len2) & 7;
532 if (padding)
533 padding = 8 - padding;
534
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200535 record__write(rec, map, event, event->header.size);
536 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300537 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200538 record__write(rec, map, data2, len2);
539 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300540
541 return 0;
542}
543
544static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200545 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300546{
547 int ret;
548
Jiri Olsae035f4c2018-09-13 14:54:05 +0200549 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300550 record__process_auxtrace);
551 if (ret < 0)
552 return ret;
553
554 if (ret)
555 rec->samples++;
556
557 return 0;
558}
559
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300560static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200561 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300562{
563 int ret;
564
Jiri Olsae035f4c2018-09-13 14:54:05 +0200565 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300566 record__process_auxtrace,
567 rec->opts.auxtrace_snapshot_size);
568 if (ret < 0)
569 return ret;
570
571 if (ret)
572 rec->samples++;
573
574 return 0;
575}
576
577static int record__auxtrace_read_snapshot_all(struct record *rec)
578{
579 int i;
580 int rc = 0;
581
582 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200583 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300584
Jiri Olsae035f4c2018-09-13 14:54:05 +0200585 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300586 continue;
587
Jiri Olsae035f4c2018-09-13 14:54:05 +0200588 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300589 rc = -1;
590 goto out;
591 }
592 }
593out:
594 return rc;
595}
596
597static void record__read_auxtrace_snapshot(struct record *rec)
598{
599 pr_debug("Recording AUX area tracing snapshot\n");
600 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000601 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300602 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000603 if (auxtrace_record__snapshot_finish(rec->itr))
604 trigger_error(&auxtrace_snapshot_trigger);
605 else
606 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300607 }
608}
609
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200610static int record__auxtrace_init(struct record *rec)
611{
612 int err;
613
614 if (!rec->itr) {
615 rec->itr = auxtrace_record__init(rec->evlist, &err);
616 if (err)
617 return err;
618 }
619
620 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
621 rec->opts.auxtrace_snapshot_opts);
622 if (err)
623 return err;
624
625 return auxtrace_parse_filters(rec->evlist);
626}
627
Adrian Huntere31f0d02015-04-30 17:37:27 +0300628#else
629
630static inline
631int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200632 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300633{
634 return 0;
635}
636
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300637static inline
638void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
639{
640}
641
642static inline
643int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
644{
645 return 0;
646}
647
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200648static int record__auxtrace_init(struct record *rec __maybe_unused)
649{
650 return 0;
651}
652
Adrian Huntere31f0d02015-04-30 17:37:27 +0300653#endif
654
Wang Nancda57a82016-06-27 10:24:03 +0000655static int record__mmap_evlist(struct record *rec,
656 struct perf_evlist *evlist)
657{
658 struct record_opts *opts = &rec->opts;
659 char msg[512];
660
Alexey Budankovf13de662019-01-22 20:50:57 +0300661 if (opts->affinity != PERF_AFFINITY_SYS)
662 cpu__setup_cpunode_map();
663
Wang Nan7a276ff2017-12-03 02:00:38 +0000664 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000665 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300666 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300667 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300668 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000669 if (errno == EPERM) {
670 pr_err("Permission error mapping pages.\n"
671 "Consider increasing "
672 "/proc/sys/kernel/perf_event_mlock_kb,\n"
673 "or try again with a smaller value of -m/--mmap_pages.\n"
674 "(current value: %u,%u)\n",
675 opts->mmap_pages, opts->auxtrace_mmap_pages);
676 return -errno;
677 } else {
678 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300679 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000680 if (errno)
681 return -errno;
682 else
683 return -EINVAL;
684 }
685 }
686 return 0;
687}
688
689static int record__mmap(struct record *rec)
690{
691 return record__mmap_evlist(rec, rec->evlist);
692}
693
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300694static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200695{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300696 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200697 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200698 struct perf_evlist *evlist = rec->evlist;
699 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300700 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600701 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200702
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300703 /*
704 * For initial_delay we need to add a dummy event so that we can track
705 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
706 * real events, the ones asked by the user.
707 */
708 if (opts->initial_delay) {
709 if (perf_evlist__add_dummy(evlist))
710 return -ENOMEM;
711
712 pos = perf_evlist__first(evlist);
713 pos->tracking = 0;
714 pos = perf_evlist__last(evlist);
715 pos->tracking = 1;
716 pos->attr.enable_on_exec = 1;
717 }
718
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300719 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100720
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300721 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200722try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400723 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300724 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900725 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300726 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300727 goto try_again;
728 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700729 if ((errno == EINVAL || errno == EBADF) &&
730 pos->leader != pos &&
731 pos->weak_group) {
732 pos = perf_evlist__reset_weak_group(evlist, pos);
733 goto try_again;
734 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300735 rc = -errno;
736 perf_evsel__open_strerror(pos, &opts->target,
737 errno, msg, sizeof(msg));
738 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600739 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300740 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800741
742 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800743 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200744
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300745 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300746 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300747 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300748 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600749 rc = -1;
750 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100751 }
752
Wang Nancda57a82016-06-27 10:24:03 +0000753 rc = record__mmap(rec);
754 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600755 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200756
Jiri Olsa563aecb2013-06-05 13:35:06 +0200757 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300758 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600759out:
760 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200761}
762
Namhyung Kime3d59112015-01-29 17:06:44 +0900763static int process_sample_event(struct perf_tool *tool,
764 union perf_event *event,
765 struct perf_sample *sample,
766 struct perf_evsel *evsel,
767 struct machine *machine)
768{
769 struct record *rec = container_of(tool, struct record, tool);
770
Jin Yao68588ba2017-12-08 21:13:42 +0800771 if (rec->evlist->first_sample_time == 0)
772 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900773
Jin Yao68588ba2017-12-08 21:13:42 +0800774 rec->evlist->last_sample_time = sample->time;
775
776 if (rec->buildid_all)
777 return 0;
778
779 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900780 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
781}
782
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300783static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200784{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200785 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200786
Jiri Olsa45112e82019-02-21 10:41:29 +0100787 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300788 return 0;
789
Namhyung Kim00dc8652014-11-04 10:14:32 +0900790 /*
791 * During this process, it'll load kernel map and replace the
792 * dso->long_name to a real pathname it found. In this case
793 * we prefer the vmlinux path like
794 * /lib/modules/3.16.4/build/vmlinux
795 *
796 * rather than build-id path (in debug directory).
797 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
798 */
799 symbol_conf.ignore_vmlinux_buildid = true;
800
Namhyung Kim61566812016-01-11 22:37:09 +0900801 /*
802 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800803 * so no need to process samples. But if timestamp_boundary is enabled,
804 * it still needs to walk on all samples to get the timestamps of
805 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900806 */
Jin Yao68588ba2017-12-08 21:13:42 +0800807 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900808 rec->tool.sample = NULL;
809
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300810 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200811}
812
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200813static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800814{
815 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200816 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800817 /*
818 *As for guest kernel when processing subcommand record&report,
819 *we arrange module mmap prior to guest kernel mmap and trigger
820 *a preload dso because default guest module symbols are loaded
821 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
822 *method is used to avoid symbol missing when the first addr is
823 *in module instead of in guest kernel.
824 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200825 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200826 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800827 if (err < 0)
828 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300829 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800830
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800831 /*
832 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
833 * have no _text sometimes.
834 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200835 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200836 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800837 if (err < 0)
838 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300839 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800840}
841
Frederic Weisbecker98402802010-05-02 22:05:29 +0200842static struct perf_event_header finished_round_event = {
843 .size = sizeof(struct perf_event_header),
844 .type = PERF_RECORD_FINISHED_ROUND,
845};
846
Alexey Budankovf13de662019-01-22 20:50:57 +0300847static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
848{
849 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
850 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
851 CPU_ZERO(&rec->affinity_mask);
852 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
853 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
854 }
855}
856
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300857static size_t process_comp_header(void *record, size_t increment)
858{
859 struct compressed_event *event = record;
860 size_t size = sizeof(*event);
861
862 if (increment) {
863 event->header.size += increment;
864 return increment;
865 }
866
867 event->header.type = PERF_RECORD_COMPRESSED;
868 event->header.size = size;
869
870 return size;
871}
872
873static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
874 void *src, size_t src_size)
875{
876 size_t compressed;
877 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct compressed_event) - 1;
878
879 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
880 max_record_size, process_comp_header);
881
882 session->bytes_transferred += src_size;
883 session->bytes_compressed += compressed;
884
885 return compressed;
886}
887
Wang Nana4ea0ec2016-07-14 08:34:36 +0000888static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300889 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200890{
Jiri Olsadcabb502014-07-25 16:56:16 +0200891 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200892 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600893 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000894 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300895 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300896 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200897
Wang Nancb216862016-06-27 10:24:04 +0000898 if (!evlist)
899 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300900
Wang Nan0b72d692017-12-04 16:51:07 +0000901 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000902 if (!maps)
903 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000904
Wang Nan0b72d692017-12-04 16:51:07 +0000905 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000906 return 0;
907
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300908 if (record__aio_enabled(rec))
909 off = record__aio_get_pos(trace_fd);
910
Wang Nana4ea0ec2016-07-14 08:34:36 +0000911 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300912 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200913 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000914
Jiri Olsae035f4c2018-09-13 14:54:05 +0200915 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300916 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300917 if (synch) {
918 flush = map->flush;
919 map->flush = 1;
920 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300921 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300922 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300923 if (synch)
924 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300925 rc = -1;
926 goto out;
927 }
928 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300929 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300930 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300931 if (synch)
932 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300933 rc = -1;
934 goto out;
935 }
David Ahern8d3eca22012-08-26 12:24:47 -0600936 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300937 if (synch)
938 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600939 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300940
Jiri Olsae035f4c2018-09-13 14:54:05 +0200941 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
942 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300943 rc = -1;
944 goto out;
945 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200946 }
947
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300948 if (record__aio_enabled(rec))
949 record__aio_set_pos(trace_fd, off);
950
Jiri Olsadcabb502014-07-25 16:56:16 +0200951 /*
952 * Mark the round finished in case we wrote
953 * at least one event.
954 */
955 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200956 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600957
Wang Nan0b72d692017-12-04 16:51:07 +0000958 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000959 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600960out:
961 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200962}
963
Alexey Budankov470530b2019-03-18 20:40:26 +0300964static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +0000965{
966 int err;
967
Alexey Budankov470530b2019-03-18 20:40:26 +0300968 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +0000969 if (err)
970 return err;
971
Alexey Budankov470530b2019-03-18 20:40:26 +0300972 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +0000973}
974
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300975static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700976{
David Ahern57706ab2013-11-06 11:41:34 -0700977 struct perf_session *session = rec->session;
978 int feat;
979
980 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
981 perf_header__set_feat(&session->header, feat);
982
983 if (rec->no_buildid)
984 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
985
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300986 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700987 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
988
989 if (!rec->opts.branch_stack)
990 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300991
992 if (!rec->opts.full_auxtrace)
993 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100994
Alexey Budankovcf790512018-10-09 17:36:24 +0300995 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
996 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
997
Jiri Olsa258031c2019-03-08 14:47:39 +0100998 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300999 if (!record__comp_enabled(rec))
1000 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001001
Jiri Olsaffa517a2015-10-25 15:51:43 +01001002 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001003}
1004
Wang Nane1ab48b2016-02-26 09:32:10 +00001005static void
1006record__finish_output(struct record *rec)
1007{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001008 struct perf_data *data = &rec->data;
1009 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001010
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001011 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001012 return;
1013
1014 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001015 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001016
1017 if (!rec->no_buildid) {
1018 process_buildids(rec);
1019
1020 if (rec->buildid_all)
1021 dsos__hit_all(rec->session);
1022 }
1023 perf_session__write_header(rec->session, rec->evlist, fd, true);
1024
1025 return;
1026}
1027
Wang Nan4ea648a2016-07-14 08:34:47 +00001028static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001029{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001030 int err;
1031 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001032
Wang Nan4ea648a2016-07-14 08:34:47 +00001033 if (rec->opts.tail_synthesize != tail)
1034 return 0;
1035
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001036 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1037 if (thread_map == NULL)
1038 return -1;
1039
1040 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001041 process_synthesized_event,
1042 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001043 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001044 thread_map__put(thread_map);
1045 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001046}
1047
Wang Nan4ea648a2016-07-14 08:34:47 +00001048static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001049
Wang Nanecfd7a92016-04-13 08:21:07 +00001050static int
1051record__switch_output(struct record *rec, bool at_exit)
1052{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001053 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001054 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001055 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001056
1057 /* Same Size: "2015122520103046"*/
1058 char timestamp[] = "InvalidTimestamp";
1059
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001060 record__aio_mmap_read_sync(rec);
1061
Wang Nan4ea648a2016-07-14 08:34:47 +00001062 record__synthesize(rec, true);
1063 if (target__none(&rec->opts.target))
1064 record__synthesize_workload(rec, true);
1065
Wang Nanecfd7a92016-04-13 08:21:07 +00001066 rec->samples = 0;
1067 record__finish_output(rec);
1068 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1069 if (err) {
1070 pr_err("Failed to get current timestamp\n");
1071 return -EINVAL;
1072 }
1073
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001074 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001075 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001076 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001077 if (fd >= 0 && !at_exit) {
1078 rec->bytes_written = 0;
1079 rec->session->header.data_size = 0;
1080 }
1081
1082 if (!quiet)
1083 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001084 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001085
Andi Kleen03724b22019-03-14 15:49:55 -07001086 if (rec->switch_output.num_files) {
1087 int n = rec->switch_output.cur_file + 1;
1088
1089 if (n >= rec->switch_output.num_files)
1090 n = 0;
1091 rec->switch_output.cur_file = n;
1092 if (rec->switch_output.filenames[n]) {
1093 remove(rec->switch_output.filenames[n]);
1094 free(rec->switch_output.filenames[n]);
1095 }
1096 rec->switch_output.filenames[n] = new_filename;
1097 } else {
1098 free(new_filename);
1099 }
1100
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001101 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001102 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001103 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001104
Wang Nanbe7b0c92016-04-20 18:59:54 +00001105 /*
1106 * In 'perf record --switch-output' without -a,
1107 * record__synthesize() in record__switch_output() won't
1108 * generate tracking events because there's no thread_map
1109 * in evlist. Which causes newly created perf.data doesn't
1110 * contain map and comm information.
1111 * Create a fake thread_map and directly call
1112 * perf_event__synthesize_thread_map() for those events.
1113 */
1114 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001115 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001116 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001117 return fd;
1118}
1119
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001120static volatile int workload_exec_errno;
1121
1122/*
1123 * perf_evlist__prepare_workload will send a SIGUSR1
1124 * if the fork fails, since we asked by setting its
1125 * want_signal to true.
1126 */
Namhyung Kim45604712014-05-12 09:47:24 +09001127static void workload_exec_failed_signal(int signo __maybe_unused,
1128 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001129 void *ucontext __maybe_unused)
1130{
1131 workload_exec_errno = info->si_value.sival_int;
1132 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001133 child_finished = 1;
1134}
1135
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001136static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001137static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001138
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001139int __weak
1140perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1141 struct perf_tool *tool __maybe_unused,
1142 perf_event__handler_t process __maybe_unused,
1143 struct machine *machine __maybe_unused)
1144{
1145 return 0;
1146}
1147
Wang Nanee667f92016-06-27 10:24:05 +00001148static const struct perf_event_mmap_page *
1149perf_evlist__pick_pc(struct perf_evlist *evlist)
1150{
Wang Nanb2cb6152016-07-14 08:34:39 +00001151 if (evlist) {
1152 if (evlist->mmap && evlist->mmap[0].base)
1153 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001154 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1155 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001156 }
Wang Nanee667f92016-06-27 10:24:05 +00001157 return NULL;
1158}
1159
Wang Nanc45628b2016-05-24 02:28:59 +00001160static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1161{
Wang Nanee667f92016-06-27 10:24:05 +00001162 const struct perf_event_mmap_page *pc;
1163
1164 pc = perf_evlist__pick_pc(rec->evlist);
1165 if (pc)
1166 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001167 return NULL;
1168}
1169
Wang Nan4ea648a2016-07-14 08:34:47 +00001170static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001171{
1172 struct perf_session *session = rec->session;
1173 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001174 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001175 struct record_opts *opts = &rec->opts;
1176 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001177 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001178 int err = 0;
1179
Wang Nan4ea648a2016-07-14 08:34:47 +00001180 if (rec->opts.tail_synthesize != tail)
1181 return 0;
1182
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001183 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001184 /*
1185 * We need to synthesize events first, because some
1186 * features works on top of them (on report side).
1187 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001188 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001189 process_synthesized_event);
1190 if (err < 0) {
1191 pr_err("Couldn't synthesize attrs.\n");
1192 goto out;
1193 }
1194
Jiri Olsaa2015512018-03-14 10:22:04 +01001195 err = perf_event__synthesize_features(tool, session, rec->evlist,
1196 process_synthesized_event);
1197 if (err < 0) {
1198 pr_err("Couldn't synthesize features.\n");
1199 return err;
1200 }
1201
Wang Nanc45c86e2016-02-26 09:32:07 +00001202 if (have_tracepoints(&rec->evlist->entries)) {
1203 /*
1204 * FIXME err <= 0 here actually means that
1205 * there were no tracepoints so its not really
1206 * an error, just that we don't need to
1207 * synthesize anything. We really have to
1208 * return this more properly and also
1209 * propagate errors that now are calling die()
1210 */
1211 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1212 process_synthesized_event);
1213 if (err <= 0) {
1214 pr_err("Couldn't record tracing data.\n");
1215 goto out;
1216 }
1217 rec->bytes_written += err;
1218 }
1219 }
1220
Wang Nanc45628b2016-05-24 02:28:59 +00001221 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001222 process_synthesized_event, machine);
1223 if (err)
1224 goto out;
1225
Wang Nanc45c86e2016-02-26 09:32:07 +00001226 if (rec->opts.full_auxtrace) {
1227 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1228 session, process_synthesized_event);
1229 if (err)
1230 goto out;
1231 }
1232
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001233 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1234 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1235 machine);
1236 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1237 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1238 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001239
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001240 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1241 machine);
1242 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1243 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1244 "Check /proc/modules permission or run as root.\n");
1245 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001246
1247 if (perf_guest) {
1248 machines__process_guests(&session->machines,
1249 perf_event__synthesize_guest_os, tool);
1250 }
1251
Andi Kleenbfd8f722017-11-17 13:42:58 -08001252 err = perf_event__synthesize_extra_attr(&rec->tool,
1253 rec->evlist,
1254 process_synthesized_event,
1255 data->is_pipe);
1256 if (err)
1257 goto out;
1258
Andi Kleen373565d2017-11-17 13:42:59 -08001259 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1260 process_synthesized_event,
1261 NULL);
1262 if (err < 0) {
1263 pr_err("Couldn't synthesize thread map.\n");
1264 return err;
1265 }
1266
1267 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1268 process_synthesized_event, NULL);
1269 if (err < 0) {
1270 pr_err("Couldn't synthesize cpu map.\n");
1271 return err;
1272 }
1273
Song Liue5416952019-03-11 22:30:41 -07001274 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001275 machine, opts);
1276 if (err < 0)
1277 pr_warning("Couldn't synthesize bpf events.\n");
1278
Wang Nanc45c86e2016-02-26 09:32:07 +00001279 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1280 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001281 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001282out:
1283 return err;
1284}
1285
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001286static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001287{
David Ahern57706ab2013-11-06 11:41:34 -07001288 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001289 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001290 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001291 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001292 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001293 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001294 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001295 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001296 bool disabled = false, draining = false;
Song Liu657ee552019-03-11 22:30:50 -07001297 struct perf_evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001298 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001299 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001300
Namhyung Kim45604712014-05-12 09:47:24 +09001301 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001302 signal(SIGCHLD, sig_handler);
1303 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001304 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001305 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001306
Hari Bathinif3b36142017-03-08 02:11:43 +05301307 if (rec->opts.record_namespaces)
1308 tool->namespace_events = true;
1309
Jiri Olsadc0c6122017-01-09 10:51:58 +01001310 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001311 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001312 if (rec->opts.auxtrace_snapshot_mode)
1313 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001314 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001315 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001316 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001317 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001318 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001319
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001320 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001321 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001322 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001323 return -1;
1324 }
1325
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001326 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001327 rec->session = session;
1328
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001329 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1330 pr_err("Compression initialization failed.\n");
1331 return -1;
1332 }
1333
1334 session->header.env.comp_type = PERF_COMP_ZSTD;
1335 session->header.env.comp_level = rec->opts.comp_level;
1336
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001337 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001338
Alexey Budankovcf790512018-10-09 17:36:24 +03001339 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1340 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1341
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001342 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001343 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001344 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001345 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001346 if (err < 0) {
1347 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001348 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001349 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001350 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001351 }
1352
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001353 /*
1354 * If we have just single event and are sending data
1355 * through pipe, we need to force the ids allocation,
1356 * because we synthesize event name through the pipe
1357 * and need the id for that.
1358 */
1359 if (data->is_pipe && rec->evlist->nr_entries == 1)
1360 rec->opts.sample_id = true;
1361
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001362 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001363 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001364 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001365 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001366 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001367
Wang Nan8690a2a2016-02-22 09:10:32 +00001368 err = bpf__apply_obj_config();
1369 if (err) {
1370 char errbuf[BUFSIZ];
1371
1372 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1373 pr_err("ERROR: Apply config to BPF failed: %s\n",
1374 errbuf);
1375 goto out_child;
1376 }
1377
Adrian Huntercca84822015-08-19 17:29:21 +03001378 /*
1379 * Normally perf_session__new would do this, but it doesn't have the
1380 * evlist.
1381 */
1382 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1383 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1384 rec->tool.ordered_events = false;
1385 }
1386
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001387 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001388 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1389
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001390 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001391 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001392 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001393 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001394 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001395 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001396 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001397 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001398 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001399
David Ahernd3665492012-02-06 15:27:52 -07001400 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001401 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001402 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001403 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001404 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001405 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001406 }
1407
Song Liud56354d2019-03-11 22:30:51 -07001408 if (!opts->no_bpf_event)
1409 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1410
Song Liu657ee552019-03-11 22:30:50 -07001411 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1412 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1413 opts->no_bpf_event = true;
1414 }
1415
Wang Nan4ea648a2016-07-14 08:34:47 +00001416 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001417 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001418 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001419
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001420 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001421 struct sched_param param;
1422
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001423 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001424 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001425 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001426 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001427 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001428 }
1429 }
1430
Jiri Olsa774cb492012-11-12 18:34:01 +01001431 /*
1432 * When perf is starting the traced process, all the events
1433 * (apart from group members) have enable_on_exec=1 set,
1434 * so don't spoil it by prematurely enabling them.
1435 */
Andi Kleen6619a532014-01-11 13:38:27 -08001436 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001437 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001438
Peter Zijlstra856e9662009-12-16 17:55:55 +01001439 /*
1440 * Let the child rip
1441 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001442 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001443 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001444 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301445 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001446
1447 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1448 if (event == NULL) {
1449 err = -ENOMEM;
1450 goto out_child;
1451 }
1452
Namhyung Kime803cf92015-09-22 09:24:55 +09001453 /*
1454 * Some H/W events are generated before COMM event
1455 * which is emitted during exec(), so perf script
1456 * cannot see a correct process name for those events.
1457 * Synthesize COMM event to prevent it.
1458 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301459 tgid = perf_event__synthesize_comm(tool, event,
1460 rec->evlist->workload.pid,
1461 process_synthesized_event,
1462 machine);
1463 free(event);
1464
1465 if (tgid == -1)
1466 goto out_child;
1467
1468 event = malloc(sizeof(event->namespaces) +
1469 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1470 machine->id_hdr_size);
1471 if (event == NULL) {
1472 err = -ENOMEM;
1473 goto out_child;
1474 }
1475
1476 /*
1477 * Synthesize NAMESPACES event for the command specified.
1478 */
1479 perf_event__synthesize_namespaces(tool, event,
1480 rec->evlist->workload.pid,
1481 tgid, process_synthesized_event,
1482 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001483 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001484
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001485 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001486 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001487
Andi Kleen6619a532014-01-11 13:38:27 -08001488 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001489 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001490 perf_evlist__enable(rec->evlist);
1491 }
1492
Wang Nan5f9cf592016-04-20 18:59:49 +00001493 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001494 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001495 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001496 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001497 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001498
Wang Nan057374642016-07-14 08:34:43 +00001499 /*
1500 * rec->evlist->bkw_mmap_state is possible to be
1501 * BKW_MMAP_EMPTY here: when done == true and
1502 * hits != rec->samples in previous round.
1503 *
1504 * perf_evlist__toggle_bkw_mmap ensure we never
1505 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1506 */
1507 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1508 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1509
Alexey Budankov470530b2019-03-18 20:40:26 +03001510 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001511 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001512 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001513 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001514 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001515 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001516
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001517 if (auxtrace_record__snapshot_started) {
1518 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001519 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001520 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001521 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001522 pr_err("AUX area tracing snapshot failed\n");
1523 err = -1;
1524 goto out_child;
1525 }
1526 }
1527
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001528 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001529 /*
1530 * If switch_output_trigger is hit, the data in
1531 * overwritable ring buffer should have been collected,
1532 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1533 *
1534 * If SIGUSR2 raise after or during record__mmap_read_all(),
1535 * record__mmap_read_all() didn't collect data from
1536 * overwritable ring buffer. Read again.
1537 */
1538 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1539 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001540 trigger_ready(&switch_output_trigger);
1541
Wang Nan057374642016-07-14 08:34:43 +00001542 /*
1543 * Reenable events in overwrite ring buffer after
1544 * record__mmap_read_all(): we should have collected
1545 * data from it.
1546 */
1547 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1548
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001549 if (!quiet)
1550 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1551 waking);
1552 waking = 0;
1553 fd = record__switch_output(rec, false);
1554 if (fd < 0) {
1555 pr_err("Failed to switch to new file\n");
1556 trigger_error(&switch_output_trigger);
1557 err = fd;
1558 goto out_child;
1559 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001560
1561 /* re-arm the alarm */
1562 if (rec->switch_output.time)
1563 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001564 }
1565
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001566 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001567 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001568 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001569 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001570 /*
1571 * Propagate error, only if there's any. Ignore positive
1572 * number of returned events and interrupt error.
1573 */
1574 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001575 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001576 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001577
1578 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1579 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001580 }
1581
Jiri Olsa774cb492012-11-12 18:34:01 +01001582 /*
1583 * When perf is starting the traced process, at the end events
1584 * die with the process and we wait for that. Thus no need to
1585 * disable events in this case.
1586 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001587 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001588 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001589 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001590 disabled = true;
1591 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001592 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001593 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001594 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001595
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001596 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001597 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001598 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001599 pr_err("Workload failed: %s\n", emsg);
1600 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001601 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001602 }
1603
Namhyung Kime3d59112015-01-29 17:06:44 +09001604 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001605 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001606
Wang Nan4ea648a2016-07-14 08:34:47 +00001607 if (target__none(&rec->opts.target))
1608 record__synthesize_workload(rec, true);
1609
Namhyung Kim45604712014-05-12 09:47:24 +09001610out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001611 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001612 record__aio_mmap_read_sync(rec);
1613
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001614 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1615 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1616 session->header.env.comp_ratio = ratio + 0.5;
1617 }
1618
Namhyung Kim45604712014-05-12 09:47:24 +09001619 if (forks) {
1620 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001621
Namhyung Kim45604712014-05-12 09:47:24 +09001622 if (!child_finished)
1623 kill(rec->evlist->workload.pid, SIGTERM);
1624
1625 wait(&exit_status);
1626
1627 if (err < 0)
1628 status = err;
1629 else if (WIFEXITED(exit_status))
1630 status = WEXITSTATUS(exit_status);
1631 else if (WIFSIGNALED(exit_status))
1632 signr = WTERMSIG(exit_status);
1633 } else
1634 status = err;
1635
Wang Nan4ea648a2016-07-14 08:34:47 +00001636 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001637 /* this will be recalculated during process_buildids() */
1638 rec->samples = 0;
1639
Wang Nanecfd7a92016-04-13 08:21:07 +00001640 if (!err) {
1641 if (!rec->timestamp_filename) {
1642 record__finish_output(rec);
1643 } else {
1644 fd = record__switch_output(rec, true);
1645 if (fd < 0) {
1646 status = fd;
1647 goto out_delete_session;
1648 }
1649 }
1650 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001651
Wang Nana0748652016-11-26 07:03:28 +00001652 perf_hooks__invoke_record_end();
1653
Namhyung Kime3d59112015-01-29 17:06:44 +09001654 if (!err && !quiet) {
1655 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001656 const char *postfix = rec->timestamp_filename ?
1657 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001658
Adrian Hunteref149c22015-04-09 18:53:45 +03001659 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001660 scnprintf(samples, sizeof(samples),
1661 " (%" PRIu64 " samples)", rec->samples);
1662 else
1663 samples[0] = '\0';
1664
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001665 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001666 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001667 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001668 if (ratio) {
1669 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1670 rec->session->bytes_transferred / 1024.0 / 1024.0,
1671 ratio);
1672 }
1673 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001674 }
1675
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001676out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001677 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001678 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001679
1680 if (!opts->no_bpf_event)
1681 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001682 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001683}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001684
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001685static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001686{
Kan Liangaad2b212015-01-05 13:23:04 -05001687 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001688
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001689 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001690
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001691 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001692 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001693 callchain->dump_size);
1694}
1695
1696int record_opts__parse_callchain(struct record_opts *record,
1697 struct callchain_param *callchain,
1698 const char *arg, bool unset)
1699{
1700 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001701 callchain->enabled = !unset;
1702
1703 /* --no-call-graph */
1704 if (unset) {
1705 callchain->record_mode = CALLCHAIN_NONE;
1706 pr_debug("callchain: disabled\n");
1707 return 0;
1708 }
1709
1710 ret = parse_callchain_record_opt(arg, callchain);
1711 if (!ret) {
1712 /* Enable data address sampling for DWARF unwind. */
1713 if (callchain->record_mode == CALLCHAIN_DWARF)
1714 record->sample_address = true;
1715 callchain_debug(callchain);
1716 }
1717
1718 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001719}
1720
Kan Liangc421e802015-07-29 05:42:12 -04001721int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001722 const char *arg,
1723 int unset)
1724{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001725 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001726}
1727
Kan Liangc421e802015-07-29 05:42:12 -04001728int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001729 const char *arg __maybe_unused,
1730 int unset __maybe_unused)
1731{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001732 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001733
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001734 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001735
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001736 if (callchain->record_mode == CALLCHAIN_NONE)
1737 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001738
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001739 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001740 return 0;
1741}
1742
Jiri Olsaeb853e82014-02-03 12:44:42 +01001743static int perf_record_config(const char *var, const char *value, void *cb)
1744{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001745 struct record *rec = cb;
1746
1747 if (!strcmp(var, "record.build-id")) {
1748 if (!strcmp(value, "cache"))
1749 rec->no_buildid_cache = false;
1750 else if (!strcmp(value, "no-cache"))
1751 rec->no_buildid_cache = true;
1752 else if (!strcmp(value, "skip"))
1753 rec->no_buildid = true;
1754 else
1755 return -1;
1756 return 0;
1757 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001758 if (!strcmp(var, "record.call-graph")) {
1759 var = "call-graph.record-mode";
1760 return perf_default_config(var, value, cb);
1761 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001762#ifdef HAVE_AIO_SUPPORT
1763 if (!strcmp(var, "record.aio")) {
1764 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1765 if (!rec->opts.nr_cblocks)
1766 rec->opts.nr_cblocks = nr_cblocks_default;
1767 }
1768#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001769
Yisheng Xiecff17202018-03-12 19:25:57 +08001770 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001771}
1772
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001773struct clockid_map {
1774 const char *name;
1775 int clockid;
1776};
1777
1778#define CLOCKID_MAP(n, c) \
1779 { .name = n, .clockid = (c), }
1780
1781#define CLOCKID_END { .name = NULL, }
1782
1783
1784/*
1785 * Add the missing ones, we need to build on many distros...
1786 */
1787#ifndef CLOCK_MONOTONIC_RAW
1788#define CLOCK_MONOTONIC_RAW 4
1789#endif
1790#ifndef CLOCK_BOOTTIME
1791#define CLOCK_BOOTTIME 7
1792#endif
1793#ifndef CLOCK_TAI
1794#define CLOCK_TAI 11
1795#endif
1796
1797static const struct clockid_map clockids[] = {
1798 /* available for all events, NMI safe */
1799 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1800 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1801
1802 /* available for some events */
1803 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1804 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1805 CLOCKID_MAP("tai", CLOCK_TAI),
1806
1807 /* available for the lazy */
1808 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1809 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1810 CLOCKID_MAP("real", CLOCK_REALTIME),
1811 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1812
1813 CLOCKID_END,
1814};
1815
Alexey Budankovcf790512018-10-09 17:36:24 +03001816static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1817{
1818 struct timespec res;
1819
1820 *res_ns = 0;
1821 if (!clock_getres(clk_id, &res))
1822 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1823 else
1824 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1825
1826 return 0;
1827}
1828
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001829static int parse_clockid(const struct option *opt, const char *str, int unset)
1830{
1831 struct record_opts *opts = (struct record_opts *)opt->value;
1832 const struct clockid_map *cm;
1833 const char *ostr = str;
1834
1835 if (unset) {
1836 opts->use_clockid = 0;
1837 return 0;
1838 }
1839
1840 /* no arg passed */
1841 if (!str)
1842 return 0;
1843
1844 /* no setting it twice */
1845 if (opts->use_clockid)
1846 return -1;
1847
1848 opts->use_clockid = true;
1849
1850 /* if its a number, we're done */
1851 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001852 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001853
1854 /* allow a "CLOCK_" prefix to the name */
1855 if (!strncasecmp(str, "CLOCK_", 6))
1856 str += 6;
1857
1858 for (cm = clockids; cm->name; cm++) {
1859 if (!strcasecmp(str, cm->name)) {
1860 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001861 return get_clockid_res(opts->clockid,
1862 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001863 }
1864 }
1865
1866 opts->use_clockid = false;
1867 ui__warning("unknown clockid %s, check man page\n", ostr);
1868 return -1;
1869}
1870
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001871static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1872{
1873 struct record_opts *opts = (struct record_opts *)opt->value;
1874
1875 if (unset || !str)
1876 return 0;
1877
1878 if (!strcasecmp(str, "node"))
1879 opts->affinity = PERF_AFFINITY_NODE;
1880 else if (!strcasecmp(str, "cpu"))
1881 opts->affinity = PERF_AFFINITY_CPU;
1882
1883 return 0;
1884}
1885
Adrian Huntere9db1312015-04-09 18:53:46 +03001886static int record__parse_mmap_pages(const struct option *opt,
1887 const char *str,
1888 int unset __maybe_unused)
1889{
1890 struct record_opts *opts = opt->value;
1891 char *s, *p;
1892 unsigned int mmap_pages;
1893 int ret;
1894
1895 if (!str)
1896 return -EINVAL;
1897
1898 s = strdup(str);
1899 if (!s)
1900 return -ENOMEM;
1901
1902 p = strchr(s, ',');
1903 if (p)
1904 *p = '\0';
1905
1906 if (*s) {
1907 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1908 if (ret)
1909 goto out_free;
1910 opts->mmap_pages = mmap_pages;
1911 }
1912
1913 if (!p) {
1914 ret = 0;
1915 goto out_free;
1916 }
1917
1918 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1919 if (ret)
1920 goto out_free;
1921
1922 opts->auxtrace_mmap_pages = mmap_pages;
1923
1924out_free:
1925 free(s);
1926 return ret;
1927}
1928
Jiri Olsa0c582442017-01-09 10:51:59 +01001929static void switch_output_size_warn(struct record *rec)
1930{
1931 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1932 struct switch_output *s = &rec->switch_output;
1933
1934 wakeup_size /= 2;
1935
1936 if (s->size < wakeup_size) {
1937 char buf[100];
1938
1939 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1940 pr_warning("WARNING: switch-output data size lower than "
1941 "wakeup kernel buffer size (%s) "
1942 "expect bigger perf.data sizes\n", buf);
1943 }
1944}
1945
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001946static int switch_output_setup(struct record *rec)
1947{
1948 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001949 static struct parse_tag tags_size[] = {
1950 { .tag = 'B', .mult = 1 },
1951 { .tag = 'K', .mult = 1 << 10 },
1952 { .tag = 'M', .mult = 1 << 20 },
1953 { .tag = 'G', .mult = 1 << 30 },
1954 { .tag = 0 },
1955 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001956 static struct parse_tag tags_time[] = {
1957 { .tag = 's', .mult = 1 },
1958 { .tag = 'm', .mult = 60 },
1959 { .tag = 'h', .mult = 60*60 },
1960 { .tag = 'd', .mult = 60*60*24 },
1961 { .tag = 0 },
1962 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001963 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001964
1965 if (!s->set)
1966 return 0;
1967
1968 if (!strcmp(s->str, "signal")) {
1969 s->signal = true;
1970 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001971 goto enabled;
1972 }
1973
1974 val = parse_tag_value(s->str, tags_size);
1975 if (val != (unsigned long) -1) {
1976 s->size = val;
1977 pr_debug("switch-output with %s size threshold\n", s->str);
1978 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001979 }
1980
Jiri Olsabfacbe32017-01-09 10:52:00 +01001981 val = parse_tag_value(s->str, tags_time);
1982 if (val != (unsigned long) -1) {
1983 s->time = val;
1984 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1985 s->str, s->time);
1986 goto enabled;
1987 }
1988
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001989 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001990
1991enabled:
1992 rec->timestamp_filename = true;
1993 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001994
1995 if (s->size && !rec->opts.no_buffering)
1996 switch_output_size_warn(rec);
1997
Jiri Olsadc0c6122017-01-09 10:51:58 +01001998 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001999}
2000
Namhyung Kime5b2c202014-10-23 00:15:46 +09002001static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002002 "perf record [<options>] [<command>]",
2003 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002004 NULL
2005};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002006const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002007
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002008/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002009 * XXX Ideally would be local to cmd_record() and passed to a record__new
2010 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002011 * after cmd_record() exits, but since record_options need to be accessible to
2012 * builtin-script, leave it here.
2013 *
2014 * At least we don't ouch it in all the other functions here directly.
2015 *
2016 * Just say no to tons of global variables, sigh.
2017 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002018static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002019 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002020 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002021 .mmap_pages = UINT_MAX,
2022 .user_freq = UINT_MAX,
2023 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002024 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002025 .target = {
2026 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002027 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002028 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002029 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002030 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002031 .tool = {
2032 .sample = process_sample_event,
2033 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002034 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002035 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302036 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002037 .mmap = perf_event__process_mmap,
2038 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002039 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002040 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002041};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002042
Namhyung Kim76a26542015-10-22 23:28:32 +09002043const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2044 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002045
Wang Nan0aab2132016-06-16 08:02:41 +00002046static bool dry_run;
2047
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002048/*
2049 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2050 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002051 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002052 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2053 * using pipes, etc.
2054 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002055static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002056 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002057 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002058 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002059 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002060 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002061 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2062 NULL, "don't record events from perf itself",
2063 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002064 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002065 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002066 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002067 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002068 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002069 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002070 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002071 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002072 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002073 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002074 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002075 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002076 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002077 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002078 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002079 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002080 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002081 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2082 &record.opts.no_inherit_set,
2083 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002084 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2085 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002086 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002087 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002088 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2089 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002090 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2091 "profile at this frequency",
2092 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002093 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2094 "number of mmap data pages and AUX area tracing mmap pages",
2095 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002096 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2097 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2098 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002099 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002100 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002101 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002102 NULL, "enables call-graph recording" ,
2103 &record_callchain_opt),
2104 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002105 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002106 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002107 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002108 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002109 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002110 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002111 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002112 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002113 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2114 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002115 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002116 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2117 &record.opts.sample_time_set,
2118 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002119 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2120 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002121 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002122 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002123 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2124 &record.no_buildid_cache_set,
2125 "do not update the buildid cache"),
2126 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2127 &record.no_buildid_set,
2128 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002129 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002130 "monitor event in cgroup name only",
2131 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002132 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002133 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002134 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2135 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002136
2137 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2138 "branch any", "sample any taken branches",
2139 parse_branch_stack),
2140
2141 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2142 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002143 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002144 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2145 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002146 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2147 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002148 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2149 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002150 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2151 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002152 " use '-I?' to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002153 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2154 "sample selected machine registers on interrupt,"
Arnaldo Carvalho de Melo8e5bc762019-05-13 15:55:01 -03002155 " use '-I?' to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002156 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2157 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002158 OPT_CALLBACK('k', "clockid", &record.opts,
2159 "clockid", "clockid to use for events, see clock_gettime()",
2160 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002161 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2162 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002163 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002164 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302165 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2166 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002167 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2168 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002169 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2170 "Configure all used events to run in kernel space.",
2171 PARSE_OPT_EXCLUSIVE),
2172 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2173 "Configure all used events to run in user space.",
2174 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00002175 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2176 "clang binary to use for compiling BPF scriptlets"),
2177 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2178 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002179 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2180 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002181 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2182 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002183 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2184 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002185 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2186 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002187 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002188 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2189 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002190 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002191 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2192 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002193 OPT_BOOLEAN(0, "dry-run", &dry_run,
2194 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002195#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002196 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2197 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002198 record__aio_parse),
2199#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002200 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2201 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2202 record__parse_affinity),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002203 OPT_END()
2204};
2205
Namhyung Kime5b2c202014-10-23 00:15:46 +09002206struct option *record_options = __record_options;
2207
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002208int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002209{
Adrian Hunteref149c22015-04-09 18:53:45 +03002210 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002211 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002212 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002213
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002214 setlocale(LC_ALL, "");
2215
Wang Nan48e1cab2015-12-14 10:39:22 +00002216#ifndef HAVE_LIBBPF_SUPPORT
2217# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2218 set_nobuild('\0', "clang-path", true);
2219 set_nobuild('\0', "clang-opt", true);
2220# undef set_nobuild
2221#endif
2222
He Kuang7efe0e02015-12-14 10:39:23 +00002223#ifndef HAVE_BPF_PROLOGUE
2224# if !defined (HAVE_DWARF_SUPPORT)
2225# define REASON "NO_DWARF=1"
2226# elif !defined (HAVE_LIBBPF_SUPPORT)
2227# define REASON "NO_LIBBPF=1"
2228# else
2229# define REASON "this architecture doesn't support BPF prologue"
2230# endif
2231# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2232 set_nobuild('\0', "vmlinux", true);
2233# undef set_nobuild
2234# undef REASON
2235#endif
2236
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002237 CPU_ZERO(&rec->affinity_mask);
2238 rec->opts.affinity = PERF_AFFINITY_SYS;
2239
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002240 rec->evlist = perf_evlist__new();
2241 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002242 return -ENOMEM;
2243
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002244 err = perf_config(perf_record_config, rec);
2245 if (err)
2246 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002247
Tom Zanussibca647a2010-11-10 08:11:30 -06002248 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002249 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002250 if (quiet)
2251 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002252
2253 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002254 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002255 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002256
Namhyung Kimbea03402012-04-26 14:15:15 +09002257 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002258 usage_with_options_msg(record_usage, record_options,
2259 "cgroup monitoring only available in system-wide mode");
2260
Stephane Eranian023695d2011-02-14 11:20:01 +02002261 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002262 if (rec->opts.record_switch_events &&
2263 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002264 ui__error("kernel does not support recording context switch events\n");
2265 parse_options_usage(record_usage, record_options, "switch-events", 0);
2266 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002267 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002268
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002269 if (switch_output_setup(rec)) {
2270 parse_options_usage(record_usage, record_options, "switch-output", 0);
2271 return -EINVAL;
2272 }
2273
Jiri Olsabfacbe32017-01-09 10:52:00 +01002274 if (rec->switch_output.time) {
2275 signal(SIGALRM, alarm_sig_handler);
2276 alarm(rec->switch_output.time);
2277 }
2278
Andi Kleen03724b22019-03-14 15:49:55 -07002279 if (rec->switch_output.num_files) {
2280 rec->switch_output.filenames = calloc(sizeof(char *),
2281 rec->switch_output.num_files);
2282 if (!rec->switch_output.filenames)
2283 return -EINVAL;
2284 }
2285
Adrian Hunter1b36c032016-09-23 17:38:39 +03002286 /*
2287 * Allow aliases to facilitate the lookup of symbols for address
2288 * filters. Refer to auxtrace_parse_filters().
2289 */
2290 symbol_conf.allow_aliases = true;
2291
2292 symbol__init(NULL);
2293
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002294 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002295 if (err)
2296 goto out;
2297
Wang Nan0aab2132016-06-16 08:02:41 +00002298 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002299 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002300
Wang Nand7888572016-04-08 15:07:24 +00002301 err = bpf__setup_stdout(rec->evlist);
2302 if (err) {
2303 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2304 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2305 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002306 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002307 }
2308
Adrian Hunteref149c22015-04-09 18:53:45 +03002309 err = -ENOMEM;
2310
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002311 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002312 pr_warning(
2313"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2314"check /proc/sys/kernel/kptr_restrict.\n\n"
2315"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2316"file is not found in the buildid cache or in the vmlinux path.\n\n"
2317"Samples in kernel modules won't be resolved at all.\n\n"
2318"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2319"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002320
Wang Nan0c1d46a2016-04-20 18:59:52 +00002321 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002322 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002323 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002324 /*
2325 * In 'perf record --switch-output', disable buildid
2326 * generation by default to reduce data file switching
2327 * overhead. Still generate buildid if they are required
2328 * explicitly using
2329 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002330 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002331 * --no-no-buildid-cache
2332 *
2333 * Following code equals to:
2334 *
2335 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2336 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2337 * disable_buildid_cache();
2338 */
2339 bool disable = true;
2340
2341 if (rec->no_buildid_set && !rec->no_buildid)
2342 disable = false;
2343 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2344 disable = false;
2345 if (disable) {
2346 rec->no_buildid = true;
2347 rec->no_buildid_cache = true;
2348 disable_buildid_cache();
2349 }
2350 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002351
Wang Nan4ea648a2016-07-14 08:34:47 +00002352 if (record.opts.overwrite)
2353 record.opts.tail_synthesize = true;
2354
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002355 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002356 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002357 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002358 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002359 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002360
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002361 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2362 rec->opts.no_inherit = true;
2363
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002364 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002365 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002366 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002367 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002368 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002369
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002370 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002371 if (err) {
2372 int saved_errno = errno;
2373
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002374 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002375 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002376
2377 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002378 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002379 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002380
Mengting Zhangca800062017-12-13 15:01:53 +08002381 /* Enable ignoring missing threads when -u/-p option is defined. */
2382 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002383
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002384 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002385 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002386 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002387
Adrian Hunteref149c22015-04-09 18:53:45 +03002388 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2389 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002390 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002391
Namhyung Kim61566812016-01-11 22:37:09 +09002392 /*
2393 * We take all buildids when the file contains
2394 * AUX area tracing data because we do not decode the
2395 * trace because it would take too long.
2396 */
2397 if (rec->opts.full_auxtrace)
2398 rec->buildid_all = true;
2399
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002400 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002401 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002402 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002403 }
2404
Alexey Budankov93f20c02018-11-06 12:07:19 +03002405 if (rec->opts.nr_cblocks > nr_cblocks_max)
2406 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002407 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002408
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002409 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002410 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002411
Alexey Budankov51255a82019-03-18 20:42:19 +03002412 if (rec->opts.comp_level > comp_level_max)
2413 rec->opts.comp_level = comp_level_max;
2414 pr_debug("comp level: %d\n", rec->opts.comp_level);
2415
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002416 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002417out:
Namhyung Kim45604712014-05-12 09:47:24 +09002418 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002419 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002420 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002421 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002422}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002423
2424static void snapshot_sig_handler(int sig __maybe_unused)
2425{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002426 struct record *rec = &record;
2427
Wang Nan5f9cf592016-04-20 18:59:49 +00002428 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2429 trigger_hit(&auxtrace_snapshot_trigger);
2430 auxtrace_record__snapshot_started = 1;
2431 if (auxtrace_record__snapshot_start(record.itr))
2432 trigger_error(&auxtrace_snapshot_trigger);
2433 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002434
Jiri Olsadc0c6122017-01-09 10:51:58 +01002435 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002436 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002437}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002438
2439static void alarm_sig_handler(int sig __maybe_unused)
2440{
2441 struct record *rec = &record;
2442
2443 if (switch_output_time(rec))
2444 trigger_hit(&switch_output_trigger);
2445}