blob: 56705d2a6beccf816b0b644243697c5f7d0175d5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030041#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030042#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080043#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030045#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030057#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030058
Jiri Olsa1b43b702017-01-09 10:51:56 +010059struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010060 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010061 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010062 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010063 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010064 const char *str;
65 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070066 char **filenames;
67 int num_files;
68 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010069};
70
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030071struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020072 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030073 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020074 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010075 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030076 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020077 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020080 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000081 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020082 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000083 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090084 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000085 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080086 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010087 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070088 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030089 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020090};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020091
Jiri Olsadc0c6122017-01-09 10:51:58 +010092static volatile int auxtrace_record__snapshot_started;
93static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
94static DEFINE_TRIGGER(switch_output_trigger);
95
Alexey Budankov9d2ed642019-01-22 20:47:43 +030096static const char *affinity_tags[PERF_AFFINITY_MAX] = {
97 "SYS", "NODE", "CPU"
98};
99
Jiri Olsadc0c6122017-01-09 10:51:58 +0100100static bool switch_output_signal(struct record *rec)
101{
102 return rec->switch_output.signal &&
103 trigger_is_ready(&switch_output_trigger);
104}
105
106static bool switch_output_size(struct record *rec)
107{
108 return rec->switch_output.size &&
109 trigger_is_ready(&switch_output_trigger) &&
110 (rec->bytes_written >= rec->switch_output.size);
111}
112
Jiri Olsabfacbe32017-01-09 10:52:00 +0100113static bool switch_output_time(struct record *rec)
114{
115 return rec->switch_output.time &&
116 trigger_is_ready(&switch_output_trigger);
117}
118
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200119static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
120 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200121{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200122 struct perf_data_file *file = &rec->session->data->file;
123
124 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100125 pr_err("failed to write perf data, error: %m\n");
126 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200127 }
David Ahern8d3eca22012-08-26 12:24:47 -0600128
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300129 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100130
131 if (switch_output_size(rec))
132 trigger_hit(&switch_output_trigger);
133
David Ahern8d3eca22012-08-26 12:24:47 -0600134 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200135}
136
Alexey Budankovef781122019-03-18 20:44:12 +0300137static int record__aio_enabled(struct record *rec);
138static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300139static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
140 void *src, size_t src_size);
141
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300142#ifdef HAVE_AIO_SUPPORT
143static int record__aio_write(struct aiocb *cblock, int trace_fd,
144 void *buf, size_t size, off_t off)
145{
146 int rc;
147
148 cblock->aio_fildes = trace_fd;
149 cblock->aio_buf = buf;
150 cblock->aio_nbytes = size;
151 cblock->aio_offset = off;
152 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
153
154 do {
155 rc = aio_write(cblock);
156 if (rc == 0) {
157 break;
158 } else if (errno != EAGAIN) {
159 cblock->aio_fildes = -1;
160 pr_err("failed to queue perf data, error: %m\n");
161 break;
162 }
163 } while (1);
164
165 return rc;
166}
167
168static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
169{
170 void *rem_buf;
171 off_t rem_off;
172 size_t rem_size;
173 int rc, aio_errno;
174 ssize_t aio_ret, written;
175
176 aio_errno = aio_error(cblock);
177 if (aio_errno == EINPROGRESS)
178 return 0;
179
180 written = aio_ret = aio_return(cblock);
181 if (aio_ret < 0) {
182 if (aio_errno != EINTR)
183 pr_err("failed to write perf data, error: %m\n");
184 written = 0;
185 }
186
187 rem_size = cblock->aio_nbytes - written;
188
189 if (rem_size == 0) {
190 cblock->aio_fildes = -1;
191 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300192 * md->refcount is incremented in record__aio_pushfn() for
193 * every aio write request started in record__aio_push() so
194 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300195 */
196 perf_mmap__put(md);
197 rc = 1;
198 } else {
199 /*
200 * aio write request may require restart with the
201 * reminder if the kernel didn't write whole
202 * chunk at once.
203 */
204 rem_off = cblock->aio_offset + written;
205 rem_buf = (void *)(cblock->aio_buf + written);
206 record__aio_write(cblock, cblock->aio_fildes,
207 rem_buf, rem_size, rem_off);
208 rc = 0;
209 }
210
211 return rc;
212}
213
Alexey Budankov93f20c02018-11-06 12:07:19 +0300214static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300215{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216 struct aiocb **aiocb = md->aio.aiocb;
217 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300218 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300219 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300220
221 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300222 do_suspend = 0;
223 for (i = 0; i < md->aio.nr_cblocks; ++i) {
224 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
225 if (sync_all)
226 aiocb[i] = NULL;
227 else
228 return i;
229 } else {
230 /*
231 * Started aio write is not complete yet
232 * so it has to be waited before the
233 * next allocation.
234 */
235 aiocb[i] = &cblocks[i];
236 do_suspend = 1;
237 }
238 }
239 if (!do_suspend)
240 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300241
Alexey Budankov93f20c02018-11-06 12:07:19 +0300242 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300243 if (!(errno == EAGAIN || errno == EINTR))
244 pr_err("failed to sync perf data, error: %m\n");
245 }
246 } while (1);
247}
248
Alexey Budankovef781122019-03-18 20:44:12 +0300249struct record_aio {
250 struct record *rec;
251 void *data;
252 size_t size;
253};
254
255static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300256{
Alexey Budankovef781122019-03-18 20:44:12 +0300257 struct record_aio *aio = to;
258
259 /*
260 * map->base data pointed by buf is copied into free map->aio.data[] buffer
261 * to release space in the kernel buffer as fast as possible, calling
262 * perf_mmap__consume() from perf_mmap__push() function.
263 *
264 * That lets the kernel to proceed with storing more profiling data into
265 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
266 *
267 * Coping can be done in two steps in case the chunk of profiling data
268 * crosses the upper bound of the kernel buffer. In this case we first move
269 * part of data from map->start till the upper bound and then the reminder
270 * from the beginning of the kernel buffer till the end of the data chunk.
271 */
272
273 if (record__comp_enabled(aio->rec)) {
274 size = zstd_compress(aio->rec->session, aio->data + aio->size,
275 perf_mmap__mmap_len(map) - aio->size,
276 buf, size);
277 } else {
278 memcpy(aio->data + aio->size, buf, size);
279 }
280
281 if (!aio->size) {
282 /*
283 * Increment map->refcount to guard map->aio.data[] buffer
284 * from premature deallocation because map object can be
285 * released earlier than aio write request started on
286 * map->aio.data[] buffer is complete.
287 *
288 * perf_mmap__put() is done at record__aio_complete()
289 * after started aio request completion or at record__aio_push()
290 * if the request failed to start.
291 */
292 perf_mmap__get(map);
293 }
294
295 aio->size += size;
296
297 return size;
298}
299
300static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
301{
302 int ret, idx;
303 int trace_fd = rec->session->data->file.fd;
304 struct record_aio aio = { .rec = rec, .size = 0 };
305
306 /*
307 * Call record__aio_sync() to wait till map->aio.data[] buffer
308 * becomes available after previous aio write operation.
309 */
310
311 idx = record__aio_sync(map, false);
312 aio.data = map->aio.data[idx];
313 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
314 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
315 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300316
317 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300318 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300319 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300320 *off += aio.size;
321 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300322 if (switch_output_size(rec))
323 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300324 } else {
325 /*
326 * Decrement map->refcount incremented in record__aio_pushfn()
327 * back if record__aio_write() operation failed to start, otherwise
328 * map->refcount is decremented in record__aio_complete() after
329 * aio write operation finishes successfully.
330 */
331 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300332 }
333
334 return ret;
335}
336
337static off_t record__aio_get_pos(int trace_fd)
338{
339 return lseek(trace_fd, 0, SEEK_CUR);
340}
341
342static void record__aio_set_pos(int trace_fd, off_t pos)
343{
344 lseek(trace_fd, pos, SEEK_SET);
345}
346
347static void record__aio_mmap_read_sync(struct record *rec)
348{
349 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200350 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300351 struct perf_mmap *maps = evlist->mmap;
352
Alexey Budankovef781122019-03-18 20:44:12 +0300353 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300354 return;
355
356 for (i = 0; i < evlist->nr_mmaps; i++) {
357 struct perf_mmap *map = &maps[i];
358
359 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300360 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300361 }
362}
363
364static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300365static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300366
367static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300368 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300369 int unset)
370{
371 struct record_opts *opts = (struct record_opts *)opt->value;
372
Alexey Budankov93f20c02018-11-06 12:07:19 +0300373 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300374 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300375 } else {
376 if (str)
377 opts->nr_cblocks = strtol(str, NULL, 0);
378 if (!opts->nr_cblocks)
379 opts->nr_cblocks = nr_cblocks_default;
380 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300381
382 return 0;
383}
384#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300385static int nr_cblocks_max = 0;
386
Alexey Budankovef781122019-03-18 20:44:12 +0300387static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
388 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300389{
390 return -1;
391}
392
393static off_t record__aio_get_pos(int trace_fd __maybe_unused)
394{
395 return -1;
396}
397
398static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
399{
400}
401
402static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
403{
404}
405#endif
406
407static int record__aio_enabled(struct record *rec)
408{
409 return rec->opts.nr_cblocks > 0;
410}
411
Alexey Budankov470530b2019-03-18 20:40:26 +0300412#define MMAP_FLUSH_DEFAULT 1
413static int record__mmap_flush_parse(const struct option *opt,
414 const char *str,
415 int unset)
416{
417 int flush_max;
418 struct record_opts *opts = (struct record_opts *)opt->value;
419 static struct parse_tag tags[] = {
420 { .tag = 'B', .mult = 1 },
421 { .tag = 'K', .mult = 1 << 10 },
422 { .tag = 'M', .mult = 1 << 20 },
423 { .tag = 'G', .mult = 1 << 30 },
424 { .tag = 0 },
425 };
426
427 if (unset)
428 return 0;
429
430 if (str) {
431 opts->mmap_flush = parse_tag_value(str, tags);
432 if (opts->mmap_flush == (int)-1)
433 opts->mmap_flush = strtol(str, NULL, 0);
434 }
435
436 if (!opts->mmap_flush)
437 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
438
439 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
440 flush_max /= 4;
441 if (opts->mmap_flush > flush_max)
442 opts->mmap_flush = flush_max;
443
444 return 0;
445}
446
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300447#ifdef HAVE_ZSTD_SUPPORT
448static unsigned int comp_level_default = 1;
449
450static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
451{
452 struct record_opts *opts = opt->value;
453
454 if (unset) {
455 opts->comp_level = 0;
456 } else {
457 if (str)
458 opts->comp_level = strtol(str, NULL, 0);
459 if (!opts->comp_level)
460 opts->comp_level = comp_level_default;
461 }
462
463 return 0;
464}
465#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300466static unsigned int comp_level_max = 22;
467
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300468static int record__comp_enabled(struct record *rec)
469{
470 return rec->opts.comp_level > 0;
471}
472
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200473static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200474 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300475 struct perf_sample *sample __maybe_unused,
476 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200477{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300478 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200479 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200480}
481
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200482static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300483{
484 struct record *rec = to;
485
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300486 if (record__comp_enabled(rec)) {
487 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
488 bf = map->data;
489 }
490
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300491 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200492 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300493}
494
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300495static volatile int done;
496static volatile int signr = -1;
497static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000498
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300499static void sig_handler(int sig)
500{
501 if (sig == SIGCHLD)
502 child_finished = 1;
503 else
504 signr = sig;
505
506 done = 1;
507}
508
Wang Nana0748652016-11-26 07:03:28 +0000509static void sigsegv_handler(int sig)
510{
511 perf_hooks__recover();
512 sighandler_dump_stack(sig);
513}
514
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300515static void record__sig_exit(void)
516{
517 if (signr == -1)
518 return;
519
520 signal(signr, SIG_DFL);
521 raise(signr);
522}
523
Adrian Huntere31f0d02015-04-30 17:37:27 +0300524#ifdef HAVE_AUXTRACE_SUPPORT
525
Adrian Hunteref149c22015-04-09 18:53:45 +0300526static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200527 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300528 union perf_event *event, void *data1,
529 size_t len1, void *data2, size_t len2)
530{
531 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100532 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300533 size_t padding;
534 u8 pad[8] = {0};
535
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100536 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300537 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100538 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300539 int err;
540
541 file_offset = lseek(fd, 0, SEEK_CUR);
542 if (file_offset == -1)
543 return -1;
544 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
545 event, file_offset);
546 if (err)
547 return err;
548 }
549
Adrian Hunteref149c22015-04-09 18:53:45 +0300550 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
551 padding = (len1 + len2) & 7;
552 if (padding)
553 padding = 8 - padding;
554
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200555 record__write(rec, map, event, event->header.size);
556 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300557 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200558 record__write(rec, map, data2, len2);
559 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300560
561 return 0;
562}
563
564static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200565 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300566{
567 int ret;
568
Jiri Olsae035f4c2018-09-13 14:54:05 +0200569 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300570 record__process_auxtrace);
571 if (ret < 0)
572 return ret;
573
574 if (ret)
575 rec->samples++;
576
577 return 0;
578}
579
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300580static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200581 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300582{
583 int ret;
584
Jiri Olsae035f4c2018-09-13 14:54:05 +0200585 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300586 record__process_auxtrace,
587 rec->opts.auxtrace_snapshot_size);
588 if (ret < 0)
589 return ret;
590
591 if (ret)
592 rec->samples++;
593
594 return 0;
595}
596
597static int record__auxtrace_read_snapshot_all(struct record *rec)
598{
599 int i;
600 int rc = 0;
601
602 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200603 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300604
Jiri Olsae035f4c2018-09-13 14:54:05 +0200605 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300606 continue;
607
Jiri Olsae035f4c2018-09-13 14:54:05 +0200608 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300609 rc = -1;
610 goto out;
611 }
612 }
613out:
614 return rc;
615}
616
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300617static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300618{
619 pr_debug("Recording AUX area tracing snapshot\n");
620 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000621 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300622 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300623 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000624 trigger_error(&auxtrace_snapshot_trigger);
625 else
626 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300627 }
628}
629
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300630static int record__auxtrace_snapshot_exit(struct record *rec)
631{
632 if (trigger_is_error(&auxtrace_snapshot_trigger))
633 return 0;
634
635 if (!auxtrace_record__snapshot_started &&
636 auxtrace_record__snapshot_start(rec->itr))
637 return -1;
638
639 record__read_auxtrace_snapshot(rec, true);
640 if (trigger_is_error(&auxtrace_snapshot_trigger))
641 return -1;
642
643 return 0;
644}
645
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200646static int record__auxtrace_init(struct record *rec)
647{
648 int err;
649
650 if (!rec->itr) {
651 rec->itr = auxtrace_record__init(rec->evlist, &err);
652 if (err)
653 return err;
654 }
655
656 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
657 rec->opts.auxtrace_snapshot_opts);
658 if (err)
659 return err;
660
661 return auxtrace_parse_filters(rec->evlist);
662}
663
Adrian Huntere31f0d02015-04-30 17:37:27 +0300664#else
665
666static inline
667int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200668 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300669{
670 return 0;
671}
672
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300673static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300674void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
675 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300676{
677}
678
679static inline
680int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
681{
682 return 0;
683}
684
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300685static inline
686int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
687{
688 return 0;
689}
690
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200691static int record__auxtrace_init(struct record *rec __maybe_unused)
692{
693 return 0;
694}
695
Adrian Huntere31f0d02015-04-30 17:37:27 +0300696#endif
697
Wang Nancda57a82016-06-27 10:24:03 +0000698static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200699 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000700{
701 struct record_opts *opts = &rec->opts;
702 char msg[512];
703
Alexey Budankovf13de662019-01-22 20:50:57 +0300704 if (opts->affinity != PERF_AFFINITY_SYS)
705 cpu__setup_cpunode_map();
706
Wang Nan7a276ff2017-12-03 02:00:38 +0000707 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000708 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300709 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300710 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300711 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000712 if (errno == EPERM) {
713 pr_err("Permission error mapping pages.\n"
714 "Consider increasing "
715 "/proc/sys/kernel/perf_event_mlock_kb,\n"
716 "or try again with a smaller value of -m/--mmap_pages.\n"
717 "(current value: %u,%u)\n",
718 opts->mmap_pages, opts->auxtrace_mmap_pages);
719 return -errno;
720 } else {
721 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300722 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000723 if (errno)
724 return -errno;
725 else
726 return -EINVAL;
727 }
728 }
729 return 0;
730}
731
732static int record__mmap(struct record *rec)
733{
734 return record__mmap_evlist(rec, rec->evlist);
735}
736
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300737static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200738{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300739 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200740 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200741 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200742 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300743 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600744 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200745
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300746 /*
747 * For initial_delay we need to add a dummy event so that we can track
748 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
749 * real events, the ones asked by the user.
750 */
751 if (opts->initial_delay) {
752 if (perf_evlist__add_dummy(evlist))
753 return -ENOMEM;
754
755 pos = perf_evlist__first(evlist);
756 pos->tracking = 0;
757 pos = perf_evlist__last(evlist);
758 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200759 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300760 }
761
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300762 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100763
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300764 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200765try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200766 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300767 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900768 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300769 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300770 goto try_again;
771 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700772 if ((errno == EINVAL || errno == EBADF) &&
773 pos->leader != pos &&
774 pos->weak_group) {
775 pos = perf_evlist__reset_weak_group(evlist, pos);
776 goto try_again;
777 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300778 rc = -errno;
779 perf_evsel__open_strerror(pos, &opts->target,
780 errno, msg, sizeof(msg));
781 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600782 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300783 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800784
785 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800786 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200787
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300788 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300789 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300790 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300791 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600792 rc = -1;
793 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100794 }
795
Wang Nancda57a82016-06-27 10:24:03 +0000796 rc = record__mmap(rec);
797 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600798 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200799
Jiri Olsa563aecb2013-06-05 13:35:06 +0200800 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300801 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600802out:
803 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200804}
805
Namhyung Kime3d59112015-01-29 17:06:44 +0900806static int process_sample_event(struct perf_tool *tool,
807 union perf_event *event,
808 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200809 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900810 struct machine *machine)
811{
812 struct record *rec = container_of(tool, struct record, tool);
813
Jin Yao68588ba2017-12-08 21:13:42 +0800814 if (rec->evlist->first_sample_time == 0)
815 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900816
Jin Yao68588ba2017-12-08 21:13:42 +0800817 rec->evlist->last_sample_time = sample->time;
818
819 if (rec->buildid_all)
820 return 0;
821
822 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900823 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
824}
825
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300826static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200827{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200828 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200829
Jiri Olsa45112e82019-02-21 10:41:29 +0100830 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300831 return 0;
832
Namhyung Kim00dc8652014-11-04 10:14:32 +0900833 /*
834 * During this process, it'll load kernel map and replace the
835 * dso->long_name to a real pathname it found. In this case
836 * we prefer the vmlinux path like
837 * /lib/modules/3.16.4/build/vmlinux
838 *
839 * rather than build-id path (in debug directory).
840 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
841 */
842 symbol_conf.ignore_vmlinux_buildid = true;
843
Namhyung Kim61566812016-01-11 22:37:09 +0900844 /*
845 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800846 * so no need to process samples. But if timestamp_boundary is enabled,
847 * it still needs to walk on all samples to get the timestamps of
848 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900849 */
Jin Yao68588ba2017-12-08 21:13:42 +0800850 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900851 rec->tool.sample = NULL;
852
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300853 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200854}
855
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200856static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800857{
858 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200859 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800860 /*
861 *As for guest kernel when processing subcommand record&report,
862 *we arrange module mmap prior to guest kernel mmap and trigger
863 *a preload dso because default guest module symbols are loaded
864 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
865 *method is used to avoid symbol missing when the first addr is
866 *in module instead of in guest kernel.
867 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200868 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200869 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800870 if (err < 0)
871 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300872 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800873
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800874 /*
875 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
876 * have no _text sometimes.
877 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200878 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200879 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800880 if (err < 0)
881 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300882 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800883}
884
Frederic Weisbecker98402802010-05-02 22:05:29 +0200885static struct perf_event_header finished_round_event = {
886 .size = sizeof(struct perf_event_header),
887 .type = PERF_RECORD_FINISHED_ROUND,
888};
889
Alexey Budankovf13de662019-01-22 20:50:57 +0300890static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
891{
892 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
893 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
894 CPU_ZERO(&rec->affinity_mask);
895 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
896 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
897 }
898}
899
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300900static size_t process_comp_header(void *record, size_t increment)
901{
Jiri Olsa72932372019-08-28 15:57:16 +0200902 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300903 size_t size = sizeof(*event);
904
905 if (increment) {
906 event->header.size += increment;
907 return increment;
908 }
909
910 event->header.type = PERF_RECORD_COMPRESSED;
911 event->header.size = size;
912
913 return size;
914}
915
916static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
917 void *src, size_t src_size)
918{
919 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200920 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300921
922 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
923 max_record_size, process_comp_header);
924
925 session->bytes_transferred += src_size;
926 session->bytes_compressed += compressed;
927
928 return compressed;
929}
930
Jiri Olsa63503db2019-07-21 13:23:52 +0200931static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300932 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200933{
Jiri Olsadcabb502014-07-25 16:56:16 +0200934 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200935 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600936 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000937 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300938 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300939 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200940
Wang Nancb216862016-06-27 10:24:04 +0000941 if (!evlist)
942 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300943
Wang Nan0b72d692017-12-04 16:51:07 +0000944 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000945 if (!maps)
946 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000947
Wang Nan0b72d692017-12-04 16:51:07 +0000948 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000949 return 0;
950
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300951 if (record__aio_enabled(rec))
952 off = record__aio_get_pos(trace_fd);
953
Wang Nana4ea0ec2016-07-14 08:34:36 +0000954 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300955 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200956 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000957
Jiri Olsae035f4c2018-09-13 14:54:05 +0200958 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300959 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300960 if (synch) {
961 flush = map->flush;
962 map->flush = 1;
963 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300964 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300965 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300966 if (synch)
967 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300968 rc = -1;
969 goto out;
970 }
971 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300972 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300973 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300974 if (synch)
975 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300976 rc = -1;
977 goto out;
978 }
David Ahern8d3eca22012-08-26 12:24:47 -0600979 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300980 if (synch)
981 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600982 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300983
Jiri Olsae035f4c2018-09-13 14:54:05 +0200984 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
985 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300986 rc = -1;
987 goto out;
988 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200989 }
990
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300991 if (record__aio_enabled(rec))
992 record__aio_set_pos(trace_fd, off);
993
Jiri Olsadcabb502014-07-25 16:56:16 +0200994 /*
995 * Mark the round finished in case we wrote
996 * at least one event.
997 */
998 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200999 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001000
Wang Nan0b72d692017-12-04 16:51:07 +00001001 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001002 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001003out:
1004 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001005}
1006
Alexey Budankov470530b2019-03-18 20:40:26 +03001007static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001008{
1009 int err;
1010
Alexey Budankov470530b2019-03-18 20:40:26 +03001011 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001012 if (err)
1013 return err;
1014
Alexey Budankov470530b2019-03-18 20:40:26 +03001015 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001016}
1017
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001018static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001019{
David Ahern57706ab2013-11-06 11:41:34 -07001020 struct perf_session *session = rec->session;
1021 int feat;
1022
1023 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1024 perf_header__set_feat(&session->header, feat);
1025
1026 if (rec->no_buildid)
1027 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1028
Jiri Olsace9036a2019-07-21 13:24:23 +02001029 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001030 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1031
1032 if (!rec->opts.branch_stack)
1033 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001034
1035 if (!rec->opts.full_auxtrace)
1036 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001037
Alexey Budankovcf790512018-10-09 17:36:24 +03001038 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1039 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1040
Jiri Olsa258031c2019-03-08 14:47:39 +01001041 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001042 if (!record__comp_enabled(rec))
1043 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001044
Jiri Olsaffa517a2015-10-25 15:51:43 +01001045 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001046}
1047
Wang Nane1ab48b2016-02-26 09:32:10 +00001048static void
1049record__finish_output(struct record *rec)
1050{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001051 struct perf_data *data = &rec->data;
1052 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001053
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001054 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001055 return;
1056
1057 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001058 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001059
1060 if (!rec->no_buildid) {
1061 process_buildids(rec);
1062
1063 if (rec->buildid_all)
1064 dsos__hit_all(rec->session);
1065 }
1066 perf_session__write_header(rec->session, rec->evlist, fd, true);
1067
1068 return;
1069}
1070
Wang Nan4ea648a2016-07-14 08:34:47 +00001071static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001072{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001073 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001074 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001075
Wang Nan4ea648a2016-07-14 08:34:47 +00001076 if (rec->opts.tail_synthesize != tail)
1077 return 0;
1078
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001079 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1080 if (thread_map == NULL)
1081 return -1;
1082
1083 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001084 process_synthesized_event,
1085 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001086 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001087 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001088 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001089}
1090
Wang Nan4ea648a2016-07-14 08:34:47 +00001091static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001092
Wang Nanecfd7a92016-04-13 08:21:07 +00001093static int
1094record__switch_output(struct record *rec, bool at_exit)
1095{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001096 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001097 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001098 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001099
1100 /* Same Size: "2015122520103046"*/
1101 char timestamp[] = "InvalidTimestamp";
1102
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001103 record__aio_mmap_read_sync(rec);
1104
Wang Nan4ea648a2016-07-14 08:34:47 +00001105 record__synthesize(rec, true);
1106 if (target__none(&rec->opts.target))
1107 record__synthesize_workload(rec, true);
1108
Wang Nanecfd7a92016-04-13 08:21:07 +00001109 rec->samples = 0;
1110 record__finish_output(rec);
1111 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1112 if (err) {
1113 pr_err("Failed to get current timestamp\n");
1114 return -EINVAL;
1115 }
1116
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001117 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001118 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001119 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001120 if (fd >= 0 && !at_exit) {
1121 rec->bytes_written = 0;
1122 rec->session->header.data_size = 0;
1123 }
1124
1125 if (!quiet)
1126 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001127 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001128
Andi Kleen03724b22019-03-14 15:49:55 -07001129 if (rec->switch_output.num_files) {
1130 int n = rec->switch_output.cur_file + 1;
1131
1132 if (n >= rec->switch_output.num_files)
1133 n = 0;
1134 rec->switch_output.cur_file = n;
1135 if (rec->switch_output.filenames[n]) {
1136 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001137 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001138 }
1139 rec->switch_output.filenames[n] = new_filename;
1140 } else {
1141 free(new_filename);
1142 }
1143
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001144 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001145 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001146 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001147
Wang Nanbe7b0c92016-04-20 18:59:54 +00001148 /*
1149 * In 'perf record --switch-output' without -a,
1150 * record__synthesize() in record__switch_output() won't
1151 * generate tracking events because there's no thread_map
1152 * in evlist. Which causes newly created perf.data doesn't
1153 * contain map and comm information.
1154 * Create a fake thread_map and directly call
1155 * perf_event__synthesize_thread_map() for those events.
1156 */
1157 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001158 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001159 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001160 return fd;
1161}
1162
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001163static volatile int workload_exec_errno;
1164
1165/*
1166 * perf_evlist__prepare_workload will send a SIGUSR1
1167 * if the fork fails, since we asked by setting its
1168 * want_signal to true.
1169 */
Namhyung Kim45604712014-05-12 09:47:24 +09001170static void workload_exec_failed_signal(int signo __maybe_unused,
1171 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001172 void *ucontext __maybe_unused)
1173{
1174 workload_exec_errno = info->si_value.sival_int;
1175 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001176 child_finished = 1;
1177}
1178
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001179static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001180static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001181
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001182int __weak
1183perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1184 struct perf_tool *tool __maybe_unused,
1185 perf_event__handler_t process __maybe_unused,
1186 struct machine *machine __maybe_unused)
1187{
1188 return 0;
1189}
1190
Wang Nanee667f92016-06-27 10:24:05 +00001191static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001192perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001193{
Wang Nanb2cb6152016-07-14 08:34:39 +00001194 if (evlist) {
1195 if (evlist->mmap && evlist->mmap[0].base)
1196 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001197 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1198 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001199 }
Wang Nanee667f92016-06-27 10:24:05 +00001200 return NULL;
1201}
1202
Wang Nanc45628b2016-05-24 02:28:59 +00001203static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1204{
Wang Nanee667f92016-06-27 10:24:05 +00001205 const struct perf_event_mmap_page *pc;
1206
1207 pc = perf_evlist__pick_pc(rec->evlist);
1208 if (pc)
1209 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001210 return NULL;
1211}
1212
Wang Nan4ea648a2016-07-14 08:34:47 +00001213static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001214{
1215 struct perf_session *session = rec->session;
1216 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001217 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001218 struct record_opts *opts = &rec->opts;
1219 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001220 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001221 int err = 0;
1222
Wang Nan4ea648a2016-07-14 08:34:47 +00001223 if (rec->opts.tail_synthesize != tail)
1224 return 0;
1225
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001226 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001227 /*
1228 * We need to synthesize events first, because some
1229 * features works on top of them (on report side).
1230 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001231 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001232 process_synthesized_event);
1233 if (err < 0) {
1234 pr_err("Couldn't synthesize attrs.\n");
1235 goto out;
1236 }
1237
Jiri Olsaa2015512018-03-14 10:22:04 +01001238 err = perf_event__synthesize_features(tool, session, rec->evlist,
1239 process_synthesized_event);
1240 if (err < 0) {
1241 pr_err("Couldn't synthesize features.\n");
1242 return err;
1243 }
1244
Jiri Olsace9036a2019-07-21 13:24:23 +02001245 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001246 /*
1247 * FIXME err <= 0 here actually means that
1248 * there were no tracepoints so its not really
1249 * an error, just that we don't need to
1250 * synthesize anything. We really have to
1251 * return this more properly and also
1252 * propagate errors that now are calling die()
1253 */
1254 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1255 process_synthesized_event);
1256 if (err <= 0) {
1257 pr_err("Couldn't record tracing data.\n");
1258 goto out;
1259 }
1260 rec->bytes_written += err;
1261 }
1262 }
1263
Wang Nanc45628b2016-05-24 02:28:59 +00001264 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001265 process_synthesized_event, machine);
1266 if (err)
1267 goto out;
1268
Wang Nanc45c86e2016-02-26 09:32:07 +00001269 if (rec->opts.full_auxtrace) {
1270 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1271 session, process_synthesized_event);
1272 if (err)
1273 goto out;
1274 }
1275
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001276 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1277 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1278 machine);
1279 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1280 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1281 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001282
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001283 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1284 machine);
1285 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1286 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1287 "Check /proc/modules permission or run as root.\n");
1288 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001289
1290 if (perf_guest) {
1291 machines__process_guests(&session->machines,
1292 perf_event__synthesize_guest_os, tool);
1293 }
1294
Andi Kleenbfd8f722017-11-17 13:42:58 -08001295 err = perf_event__synthesize_extra_attr(&rec->tool,
1296 rec->evlist,
1297 process_synthesized_event,
1298 data->is_pipe);
1299 if (err)
1300 goto out;
1301
Jiri Olsa03617c22019-07-21 13:24:42 +02001302 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001303 process_synthesized_event,
1304 NULL);
1305 if (err < 0) {
1306 pr_err("Couldn't synthesize thread map.\n");
1307 return err;
1308 }
1309
Jiri Olsaf72f9012019-07-21 13:24:41 +02001310 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001311 process_synthesized_event, NULL);
1312 if (err < 0) {
1313 pr_err("Couldn't synthesize cpu map.\n");
1314 return err;
1315 }
1316
Song Liue5416952019-03-11 22:30:41 -07001317 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001318 machine, opts);
1319 if (err < 0)
1320 pr_warning("Couldn't synthesize bpf events.\n");
1321
Jiri Olsa03617c22019-07-21 13:24:42 +02001322 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001323 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001324 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001325out:
1326 return err;
1327}
1328
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001329static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001330{
David Ahern57706ab2013-11-06 11:41:34 -07001331 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001332 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001333 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001334 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001335 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001336 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001337 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001338 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001339 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001340 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001341 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001342 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001343
Namhyung Kim45604712014-05-12 09:47:24 +09001344 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001345 signal(SIGCHLD, sig_handler);
1346 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001347 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001348 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001349
Hari Bathinif3b36142017-03-08 02:11:43 +05301350 if (rec->opts.record_namespaces)
1351 tool->namespace_events = true;
1352
Jiri Olsadc0c6122017-01-09 10:51:58 +01001353 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001354 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001355 if (rec->opts.auxtrace_snapshot_mode)
1356 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001357 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001358 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001359 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001360 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001361 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001362
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001363 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001364 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001365 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001366 return -1;
1367 }
1368
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001369 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001370 rec->session = session;
1371
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001372 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1373 pr_err("Compression initialization failed.\n");
1374 return -1;
1375 }
1376
1377 session->header.env.comp_type = PERF_COMP_ZSTD;
1378 session->header.env.comp_level = rec->opts.comp_level;
1379
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001380 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001381
Alexey Budankovcf790512018-10-09 17:36:24 +03001382 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1383 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1384
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001385 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001386 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001387 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001388 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001389 if (err < 0) {
1390 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001391 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001392 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001393 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001394 }
1395
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001396 /*
1397 * If we have just single event and are sending data
1398 * through pipe, we need to force the ids allocation,
1399 * because we synthesize event name through the pipe
1400 * and need the id for that.
1401 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001402 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001403 rec->opts.sample_id = true;
1404
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001405 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001406 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001407 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001408 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001409 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001410
Wang Nan8690a2a2016-02-22 09:10:32 +00001411 err = bpf__apply_obj_config();
1412 if (err) {
1413 char errbuf[BUFSIZ];
1414
1415 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1416 pr_err("ERROR: Apply config to BPF failed: %s\n",
1417 errbuf);
1418 goto out_child;
1419 }
1420
Adrian Huntercca84822015-08-19 17:29:21 +03001421 /*
1422 * Normally perf_session__new would do this, but it doesn't have the
1423 * evlist.
1424 */
1425 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1426 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1427 rec->tool.ordered_events = false;
1428 }
1429
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001430 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001431 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1432
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001433 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001434 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001435 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001436 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001437 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001438 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001439 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001440 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001441 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001442
David Ahernd3665492012-02-06 15:27:52 -07001443 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001444 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001445 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001446 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001447 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001448 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001449 }
1450
Song Liud56354d2019-03-11 22:30:51 -07001451 if (!opts->no_bpf_event)
1452 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1453
Song Liu657ee552019-03-11 22:30:50 -07001454 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1455 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1456 opts->no_bpf_event = true;
1457 }
1458
Wang Nan4ea648a2016-07-14 08:34:47 +00001459 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001460 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001461 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001462
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001463 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001464 struct sched_param param;
1465
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001466 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001467 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001468 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001469 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001470 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001471 }
1472 }
1473
Jiri Olsa774cb492012-11-12 18:34:01 +01001474 /*
1475 * When perf is starting the traced process, all the events
1476 * (apart from group members) have enable_on_exec=1 set,
1477 * so don't spoil it by prematurely enabling them.
1478 */
Andi Kleen6619a532014-01-11 13:38:27 -08001479 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001480 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001481
Peter Zijlstra856e9662009-12-16 17:55:55 +01001482 /*
1483 * Let the child rip
1484 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001485 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001486 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001487 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301488 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001489
1490 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1491 if (event == NULL) {
1492 err = -ENOMEM;
1493 goto out_child;
1494 }
1495
Namhyung Kime803cf92015-09-22 09:24:55 +09001496 /*
1497 * Some H/W events are generated before COMM event
1498 * which is emitted during exec(), so perf script
1499 * cannot see a correct process name for those events.
1500 * Synthesize COMM event to prevent it.
1501 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301502 tgid = perf_event__synthesize_comm(tool, event,
1503 rec->evlist->workload.pid,
1504 process_synthesized_event,
1505 machine);
1506 free(event);
1507
1508 if (tgid == -1)
1509 goto out_child;
1510
1511 event = malloc(sizeof(event->namespaces) +
1512 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1513 machine->id_hdr_size);
1514 if (event == NULL) {
1515 err = -ENOMEM;
1516 goto out_child;
1517 }
1518
1519 /*
1520 * Synthesize NAMESPACES event for the command specified.
1521 */
1522 perf_event__synthesize_namespaces(tool, event,
1523 rec->evlist->workload.pid,
1524 tgid, process_synthesized_event,
1525 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001526 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001527
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001528 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001529 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001530
Andi Kleen6619a532014-01-11 13:38:27 -08001531 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001532 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001533 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001534 }
1535
Wang Nan5f9cf592016-04-20 18:59:49 +00001536 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001537 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001538 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001539 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001540 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001541
Wang Nan057374642016-07-14 08:34:43 +00001542 /*
1543 * rec->evlist->bkw_mmap_state is possible to be
1544 * BKW_MMAP_EMPTY here: when done == true and
1545 * hits != rec->samples in previous round.
1546 *
1547 * perf_evlist__toggle_bkw_mmap ensure we never
1548 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1549 */
1550 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1551 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1552
Alexey Budankov470530b2019-03-18 20:40:26 +03001553 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001554 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001555 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001556 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001557 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001558 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001559
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001560 if (auxtrace_record__snapshot_started) {
1561 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001562 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001563 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001564 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001565 pr_err("AUX area tracing snapshot failed\n");
1566 err = -1;
1567 goto out_child;
1568 }
1569 }
1570
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001571 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001572 /*
1573 * If switch_output_trigger is hit, the data in
1574 * overwritable ring buffer should have been collected,
1575 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1576 *
1577 * If SIGUSR2 raise after or during record__mmap_read_all(),
1578 * record__mmap_read_all() didn't collect data from
1579 * overwritable ring buffer. Read again.
1580 */
1581 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1582 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001583 trigger_ready(&switch_output_trigger);
1584
Wang Nan057374642016-07-14 08:34:43 +00001585 /*
1586 * Reenable events in overwrite ring buffer after
1587 * record__mmap_read_all(): we should have collected
1588 * data from it.
1589 */
1590 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1591
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001592 if (!quiet)
1593 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1594 waking);
1595 waking = 0;
1596 fd = record__switch_output(rec, false);
1597 if (fd < 0) {
1598 pr_err("Failed to switch to new file\n");
1599 trigger_error(&switch_output_trigger);
1600 err = fd;
1601 goto out_child;
1602 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001603
1604 /* re-arm the alarm */
1605 if (rec->switch_output.time)
1606 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001607 }
1608
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001609 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001610 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001611 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001612 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001613 /*
1614 * Propagate error, only if there's any. Ignore positive
1615 * number of returned events and interrupt error.
1616 */
1617 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001618 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001619 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001620
1621 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1622 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001623 }
1624
Jiri Olsa774cb492012-11-12 18:34:01 +01001625 /*
1626 * When perf is starting the traced process, at the end events
1627 * die with the process and we wait for that. Thus no need to
1628 * disable events in this case.
1629 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001630 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001631 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001632 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001633 disabled = true;
1634 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001635 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001636
Wang Nan5f9cf592016-04-20 18:59:49 +00001637 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001638 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001639
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001640 if (opts->auxtrace_snapshot_on_exit)
1641 record__auxtrace_snapshot_exit(rec);
1642
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001643 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001644 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001645 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001646 pr_err("Workload failed: %s\n", emsg);
1647 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001648 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001649 }
1650
Namhyung Kime3d59112015-01-29 17:06:44 +09001651 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001652 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001653
Wang Nan4ea648a2016-07-14 08:34:47 +00001654 if (target__none(&rec->opts.target))
1655 record__synthesize_workload(rec, true);
1656
Namhyung Kim45604712014-05-12 09:47:24 +09001657out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001658 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001659 record__aio_mmap_read_sync(rec);
1660
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001661 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1662 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1663 session->header.env.comp_ratio = ratio + 0.5;
1664 }
1665
Namhyung Kim45604712014-05-12 09:47:24 +09001666 if (forks) {
1667 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001668
Namhyung Kim45604712014-05-12 09:47:24 +09001669 if (!child_finished)
1670 kill(rec->evlist->workload.pid, SIGTERM);
1671
1672 wait(&exit_status);
1673
1674 if (err < 0)
1675 status = err;
1676 else if (WIFEXITED(exit_status))
1677 status = WEXITSTATUS(exit_status);
1678 else if (WIFSIGNALED(exit_status))
1679 signr = WTERMSIG(exit_status);
1680 } else
1681 status = err;
1682
Wang Nan4ea648a2016-07-14 08:34:47 +00001683 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001684 /* this will be recalculated during process_buildids() */
1685 rec->samples = 0;
1686
Wang Nanecfd7a92016-04-13 08:21:07 +00001687 if (!err) {
1688 if (!rec->timestamp_filename) {
1689 record__finish_output(rec);
1690 } else {
1691 fd = record__switch_output(rec, true);
1692 if (fd < 0) {
1693 status = fd;
1694 goto out_delete_session;
1695 }
1696 }
1697 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001698
Wang Nana0748652016-11-26 07:03:28 +00001699 perf_hooks__invoke_record_end();
1700
Namhyung Kime3d59112015-01-29 17:06:44 +09001701 if (!err && !quiet) {
1702 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001703 const char *postfix = rec->timestamp_filename ?
1704 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001705
Adrian Hunteref149c22015-04-09 18:53:45 +03001706 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001707 scnprintf(samples, sizeof(samples),
1708 " (%" PRIu64 " samples)", rec->samples);
1709 else
1710 samples[0] = '\0';
1711
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001712 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001713 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001714 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001715 if (ratio) {
1716 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1717 rec->session->bytes_transferred / 1024.0 / 1024.0,
1718 ratio);
1719 }
1720 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001721 }
1722
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001723out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001724 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001725 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001726
1727 if (!opts->no_bpf_event)
1728 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001729 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001730}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001731
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001732static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001733{
Kan Liangaad2b212015-01-05 13:23:04 -05001734 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001735
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001736 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001737
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001738 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001739 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001740 callchain->dump_size);
1741}
1742
1743int record_opts__parse_callchain(struct record_opts *record,
1744 struct callchain_param *callchain,
1745 const char *arg, bool unset)
1746{
1747 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001748 callchain->enabled = !unset;
1749
1750 /* --no-call-graph */
1751 if (unset) {
1752 callchain->record_mode = CALLCHAIN_NONE;
1753 pr_debug("callchain: disabled\n");
1754 return 0;
1755 }
1756
1757 ret = parse_callchain_record_opt(arg, callchain);
1758 if (!ret) {
1759 /* Enable data address sampling for DWARF unwind. */
1760 if (callchain->record_mode == CALLCHAIN_DWARF)
1761 record->sample_address = true;
1762 callchain_debug(callchain);
1763 }
1764
1765 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001766}
1767
Kan Liangc421e802015-07-29 05:42:12 -04001768int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001769 const char *arg,
1770 int unset)
1771{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001772 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001773}
1774
Kan Liangc421e802015-07-29 05:42:12 -04001775int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001776 const char *arg __maybe_unused,
1777 int unset __maybe_unused)
1778{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001779 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001780
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001781 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001782
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001783 if (callchain->record_mode == CALLCHAIN_NONE)
1784 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001785
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001786 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001787 return 0;
1788}
1789
Jiri Olsaeb853e82014-02-03 12:44:42 +01001790static int perf_record_config(const char *var, const char *value, void *cb)
1791{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001792 struct record *rec = cb;
1793
1794 if (!strcmp(var, "record.build-id")) {
1795 if (!strcmp(value, "cache"))
1796 rec->no_buildid_cache = false;
1797 else if (!strcmp(value, "no-cache"))
1798 rec->no_buildid_cache = true;
1799 else if (!strcmp(value, "skip"))
1800 rec->no_buildid = true;
1801 else
1802 return -1;
1803 return 0;
1804 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001805 if (!strcmp(var, "record.call-graph")) {
1806 var = "call-graph.record-mode";
1807 return perf_default_config(var, value, cb);
1808 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001809#ifdef HAVE_AIO_SUPPORT
1810 if (!strcmp(var, "record.aio")) {
1811 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1812 if (!rec->opts.nr_cblocks)
1813 rec->opts.nr_cblocks = nr_cblocks_default;
1814 }
1815#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001816
Yisheng Xiecff17202018-03-12 19:25:57 +08001817 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001818}
1819
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001820struct clockid_map {
1821 const char *name;
1822 int clockid;
1823};
1824
1825#define CLOCKID_MAP(n, c) \
1826 { .name = n, .clockid = (c), }
1827
1828#define CLOCKID_END { .name = NULL, }
1829
1830
1831/*
1832 * Add the missing ones, we need to build on many distros...
1833 */
1834#ifndef CLOCK_MONOTONIC_RAW
1835#define CLOCK_MONOTONIC_RAW 4
1836#endif
1837#ifndef CLOCK_BOOTTIME
1838#define CLOCK_BOOTTIME 7
1839#endif
1840#ifndef CLOCK_TAI
1841#define CLOCK_TAI 11
1842#endif
1843
1844static const struct clockid_map clockids[] = {
1845 /* available for all events, NMI safe */
1846 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1847 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1848
1849 /* available for some events */
1850 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1851 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1852 CLOCKID_MAP("tai", CLOCK_TAI),
1853
1854 /* available for the lazy */
1855 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1856 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1857 CLOCKID_MAP("real", CLOCK_REALTIME),
1858 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1859
1860 CLOCKID_END,
1861};
1862
Alexey Budankovcf790512018-10-09 17:36:24 +03001863static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1864{
1865 struct timespec res;
1866
1867 *res_ns = 0;
1868 if (!clock_getres(clk_id, &res))
1869 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1870 else
1871 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1872
1873 return 0;
1874}
1875
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001876static int parse_clockid(const struct option *opt, const char *str, int unset)
1877{
1878 struct record_opts *opts = (struct record_opts *)opt->value;
1879 const struct clockid_map *cm;
1880 const char *ostr = str;
1881
1882 if (unset) {
1883 opts->use_clockid = 0;
1884 return 0;
1885 }
1886
1887 /* no arg passed */
1888 if (!str)
1889 return 0;
1890
1891 /* no setting it twice */
1892 if (opts->use_clockid)
1893 return -1;
1894
1895 opts->use_clockid = true;
1896
1897 /* if its a number, we're done */
1898 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001899 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001900
1901 /* allow a "CLOCK_" prefix to the name */
1902 if (!strncasecmp(str, "CLOCK_", 6))
1903 str += 6;
1904
1905 for (cm = clockids; cm->name; cm++) {
1906 if (!strcasecmp(str, cm->name)) {
1907 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001908 return get_clockid_res(opts->clockid,
1909 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001910 }
1911 }
1912
1913 opts->use_clockid = false;
1914 ui__warning("unknown clockid %s, check man page\n", ostr);
1915 return -1;
1916}
1917
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001918static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1919{
1920 struct record_opts *opts = (struct record_opts *)opt->value;
1921
1922 if (unset || !str)
1923 return 0;
1924
1925 if (!strcasecmp(str, "node"))
1926 opts->affinity = PERF_AFFINITY_NODE;
1927 else if (!strcasecmp(str, "cpu"))
1928 opts->affinity = PERF_AFFINITY_CPU;
1929
1930 return 0;
1931}
1932
Adrian Huntere9db1312015-04-09 18:53:46 +03001933static int record__parse_mmap_pages(const struct option *opt,
1934 const char *str,
1935 int unset __maybe_unused)
1936{
1937 struct record_opts *opts = opt->value;
1938 char *s, *p;
1939 unsigned int mmap_pages;
1940 int ret;
1941
1942 if (!str)
1943 return -EINVAL;
1944
1945 s = strdup(str);
1946 if (!s)
1947 return -ENOMEM;
1948
1949 p = strchr(s, ',');
1950 if (p)
1951 *p = '\0';
1952
1953 if (*s) {
1954 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1955 if (ret)
1956 goto out_free;
1957 opts->mmap_pages = mmap_pages;
1958 }
1959
1960 if (!p) {
1961 ret = 0;
1962 goto out_free;
1963 }
1964
1965 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1966 if (ret)
1967 goto out_free;
1968
1969 opts->auxtrace_mmap_pages = mmap_pages;
1970
1971out_free:
1972 free(s);
1973 return ret;
1974}
1975
Jiri Olsa0c582442017-01-09 10:51:59 +01001976static void switch_output_size_warn(struct record *rec)
1977{
1978 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1979 struct switch_output *s = &rec->switch_output;
1980
1981 wakeup_size /= 2;
1982
1983 if (s->size < wakeup_size) {
1984 char buf[100];
1985
1986 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1987 pr_warning("WARNING: switch-output data size lower than "
1988 "wakeup kernel buffer size (%s) "
1989 "expect bigger perf.data sizes\n", buf);
1990 }
1991}
1992
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001993static int switch_output_setup(struct record *rec)
1994{
1995 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001996 static struct parse_tag tags_size[] = {
1997 { .tag = 'B', .mult = 1 },
1998 { .tag = 'K', .mult = 1 << 10 },
1999 { .tag = 'M', .mult = 1 << 20 },
2000 { .tag = 'G', .mult = 1 << 30 },
2001 { .tag = 0 },
2002 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002003 static struct parse_tag tags_time[] = {
2004 { .tag = 's', .mult = 1 },
2005 { .tag = 'm', .mult = 60 },
2006 { .tag = 'h', .mult = 60*60 },
2007 { .tag = 'd', .mult = 60*60*24 },
2008 { .tag = 0 },
2009 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002010 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002011
2012 if (!s->set)
2013 return 0;
2014
2015 if (!strcmp(s->str, "signal")) {
2016 s->signal = true;
2017 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002018 goto enabled;
2019 }
2020
2021 val = parse_tag_value(s->str, tags_size);
2022 if (val != (unsigned long) -1) {
2023 s->size = val;
2024 pr_debug("switch-output with %s size threshold\n", s->str);
2025 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002026 }
2027
Jiri Olsabfacbe32017-01-09 10:52:00 +01002028 val = parse_tag_value(s->str, tags_time);
2029 if (val != (unsigned long) -1) {
2030 s->time = val;
2031 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2032 s->str, s->time);
2033 goto enabled;
2034 }
2035
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002036 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002037
2038enabled:
2039 rec->timestamp_filename = true;
2040 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002041
2042 if (s->size && !rec->opts.no_buffering)
2043 switch_output_size_warn(rec);
2044
Jiri Olsadc0c6122017-01-09 10:51:58 +01002045 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002046}
2047
Namhyung Kime5b2c202014-10-23 00:15:46 +09002048static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002049 "perf record [<options>] [<command>]",
2050 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002051 NULL
2052};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002053const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002054
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002055/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002056 * XXX Ideally would be local to cmd_record() and passed to a record__new
2057 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002058 * after cmd_record() exits, but since record_options need to be accessible to
2059 * builtin-script, leave it here.
2060 *
2061 * At least we don't ouch it in all the other functions here directly.
2062 *
2063 * Just say no to tons of global variables, sigh.
2064 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002065static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002066 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002067 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002068 .mmap_pages = UINT_MAX,
2069 .user_freq = UINT_MAX,
2070 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002071 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002072 .target = {
2073 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002074 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002075 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002076 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002077 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002078 .tool = {
2079 .sample = process_sample_event,
2080 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002081 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002082 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302083 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002084 .mmap = perf_event__process_mmap,
2085 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002086 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002087 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002088};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002089
Namhyung Kim76a26542015-10-22 23:28:32 +09002090const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2091 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002092
Wang Nan0aab2132016-06-16 08:02:41 +00002093static bool dry_run;
2094
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002095/*
2096 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2097 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002098 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002099 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2100 * using pipes, etc.
2101 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002102static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002103 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002104 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002105 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002106 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002107 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002108 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2109 NULL, "don't record events from perf itself",
2110 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002111 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002112 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002113 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002114 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002115 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002116 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002117 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002118 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002119 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002120 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002121 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002122 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002123 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002124 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002125 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002126 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002127 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002128 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2129 &record.opts.no_inherit_set,
2130 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002131 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2132 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002133 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002134 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002135 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2136 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002137 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2138 "profile at this frequency",
2139 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002140 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2141 "number of mmap data pages and AUX area tracing mmap pages",
2142 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002143 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2144 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2145 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002146 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002147 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002148 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002149 NULL, "enables call-graph recording" ,
2150 &record_callchain_opt),
2151 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002152 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002153 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002154 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002155 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002156 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002157 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002158 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002159 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002160 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2161 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002162 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002163 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2164 &record.opts.sample_time_set,
2165 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002166 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2167 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002168 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002169 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002170 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2171 &record.no_buildid_cache_set,
2172 "do not update the buildid cache"),
2173 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2174 &record.no_buildid_set,
2175 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002176 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002177 "monitor event in cgroup name only",
2178 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002179 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002180 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002181 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2182 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002183
2184 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2185 "branch any", "sample any taken branches",
2186 parse_branch_stack),
2187
2188 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2189 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002190 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002191 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2192 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002193 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2194 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002195 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2196 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002197 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2198 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002199 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002200 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2201 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002202 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002203 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2204 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002205 OPT_CALLBACK('k', "clockid", &record.opts,
2206 "clockid", "clockid to use for events, see clock_gettime()",
2207 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002208 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2209 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002210 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002211 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302212 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2213 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002214 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2215 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002216 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2217 "Configure all used events to run in kernel space.",
2218 PARSE_OPT_EXCLUSIVE),
2219 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2220 "Configure all used events to run in user space.",
2221 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002222 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2223 "collect kernel callchains"),
2224 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2225 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002226 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2227 "clang binary to use for compiling BPF scriptlets"),
2228 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2229 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002230 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2231 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002232 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2233 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002234 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2235 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002236 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2237 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002238 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002239 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2240 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002241 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002242 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2243 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002244 OPT_BOOLEAN(0, "dry-run", &dry_run,
2245 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002246#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002247 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2248 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002249 record__aio_parse),
2250#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002251 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2252 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2253 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002254#ifdef HAVE_ZSTD_SUPPORT
2255 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2256 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2257 record__parse_comp_level),
2258#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002259 OPT_END()
2260};
2261
Namhyung Kime5b2c202014-10-23 00:15:46 +09002262struct option *record_options = __record_options;
2263
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002264int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002265{
Adrian Hunteref149c22015-04-09 18:53:45 +03002266 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002267 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002268 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002269
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002270 setlocale(LC_ALL, "");
2271
Wang Nan48e1cab2015-12-14 10:39:22 +00002272#ifndef HAVE_LIBBPF_SUPPORT
2273# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2274 set_nobuild('\0', "clang-path", true);
2275 set_nobuild('\0', "clang-opt", true);
2276# undef set_nobuild
2277#endif
2278
He Kuang7efe0e02015-12-14 10:39:23 +00002279#ifndef HAVE_BPF_PROLOGUE
2280# if !defined (HAVE_DWARF_SUPPORT)
2281# define REASON "NO_DWARF=1"
2282# elif !defined (HAVE_LIBBPF_SUPPORT)
2283# define REASON "NO_LIBBPF=1"
2284# else
2285# define REASON "this architecture doesn't support BPF prologue"
2286# endif
2287# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2288 set_nobuild('\0', "vmlinux", true);
2289# undef set_nobuild
2290# undef REASON
2291#endif
2292
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002293 CPU_ZERO(&rec->affinity_mask);
2294 rec->opts.affinity = PERF_AFFINITY_SYS;
2295
Jiri Olsa0f98b112019-07-21 13:23:55 +02002296 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002297 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002298 return -ENOMEM;
2299
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002300 err = perf_config(perf_record_config, rec);
2301 if (err)
2302 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002303
Tom Zanussibca647a2010-11-10 08:11:30 -06002304 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002305 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002306 if (quiet)
2307 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002308
2309 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002310 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002311 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002312
Namhyung Kimbea03402012-04-26 14:15:15 +09002313 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002314 usage_with_options_msg(record_usage, record_options,
2315 "cgroup monitoring only available in system-wide mode");
2316
Stephane Eranian023695d2011-02-14 11:20:01 +02002317 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002318
2319 if (rec->opts.comp_level != 0) {
2320 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2321 rec->no_buildid = true;
2322 }
2323
Adrian Hunterb757bb02015-07-21 12:44:04 +03002324 if (rec->opts.record_switch_events &&
2325 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002326 ui__error("kernel does not support recording context switch events\n");
2327 parse_options_usage(record_usage, record_options, "switch-events", 0);
2328 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002329 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002330
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002331 if (switch_output_setup(rec)) {
2332 parse_options_usage(record_usage, record_options, "switch-output", 0);
2333 return -EINVAL;
2334 }
2335
Jiri Olsabfacbe32017-01-09 10:52:00 +01002336 if (rec->switch_output.time) {
2337 signal(SIGALRM, alarm_sig_handler);
2338 alarm(rec->switch_output.time);
2339 }
2340
Andi Kleen03724b22019-03-14 15:49:55 -07002341 if (rec->switch_output.num_files) {
2342 rec->switch_output.filenames = calloc(sizeof(char *),
2343 rec->switch_output.num_files);
2344 if (!rec->switch_output.filenames)
2345 return -EINVAL;
2346 }
2347
Adrian Hunter1b36c032016-09-23 17:38:39 +03002348 /*
2349 * Allow aliases to facilitate the lookup of symbols for address
2350 * filters. Refer to auxtrace_parse_filters().
2351 */
2352 symbol_conf.allow_aliases = true;
2353
2354 symbol__init(NULL);
2355
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002356 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002357 if (err)
2358 goto out;
2359
Wang Nan0aab2132016-06-16 08:02:41 +00002360 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002361 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002362
Wang Nand7888572016-04-08 15:07:24 +00002363 err = bpf__setup_stdout(rec->evlist);
2364 if (err) {
2365 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2366 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2367 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002368 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002369 }
2370
Adrian Hunteref149c22015-04-09 18:53:45 +03002371 err = -ENOMEM;
2372
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002373 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002374 pr_warning(
2375"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
Igor Lubashevd06e5fa2019-08-26 21:39:16 -04002376"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002377"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2378"file is not found in the buildid cache or in the vmlinux path.\n\n"
2379"Samples in kernel modules won't be resolved at all.\n\n"
2380"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2381"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002382
Wang Nan0c1d46a2016-04-20 18:59:52 +00002383 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002384 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002385 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002386 /*
2387 * In 'perf record --switch-output', disable buildid
2388 * generation by default to reduce data file switching
2389 * overhead. Still generate buildid if they are required
2390 * explicitly using
2391 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002392 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002393 * --no-no-buildid-cache
2394 *
2395 * Following code equals to:
2396 *
2397 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2398 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2399 * disable_buildid_cache();
2400 */
2401 bool disable = true;
2402
2403 if (rec->no_buildid_set && !rec->no_buildid)
2404 disable = false;
2405 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2406 disable = false;
2407 if (disable) {
2408 rec->no_buildid = true;
2409 rec->no_buildid_cache = true;
2410 disable_buildid_cache();
2411 }
2412 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002413
Wang Nan4ea648a2016-07-14 08:34:47 +00002414 if (record.opts.overwrite)
2415 record.opts.tail_synthesize = true;
2416
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002417 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002418 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002419 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002420 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002421 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002422
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002423 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2424 rec->opts.no_inherit = true;
2425
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002426 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002427 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002428 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002429 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002430 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002431
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002432 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002433 if (err) {
2434 int saved_errno = errno;
2435
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002436 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002437 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002438
2439 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002440 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002441 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002442
Mengting Zhangca800062017-12-13 15:01:53 +08002443 /* Enable ignoring missing threads when -u/-p option is defined. */
2444 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002445
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002446 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002447 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002448 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002449
Adrian Hunteref149c22015-04-09 18:53:45 +03002450 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2451 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002452 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002453
Namhyung Kim61566812016-01-11 22:37:09 +09002454 /*
2455 * We take all buildids when the file contains
2456 * AUX area tracing data because we do not decode the
2457 * trace because it would take too long.
2458 */
2459 if (rec->opts.full_auxtrace)
2460 rec->buildid_all = true;
2461
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002462 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002463 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002464 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002465 }
2466
Alexey Budankov93f20c02018-11-06 12:07:19 +03002467 if (rec->opts.nr_cblocks > nr_cblocks_max)
2468 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002469 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002470
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002471 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002472 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002473
Alexey Budankov51255a82019-03-18 20:42:19 +03002474 if (rec->opts.comp_level > comp_level_max)
2475 rec->opts.comp_level = comp_level_max;
2476 pr_debug("comp level: %d\n", rec->opts.comp_level);
2477
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002478 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002479out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002480 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002481 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002482 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002483 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002484}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002485
2486static void snapshot_sig_handler(int sig __maybe_unused)
2487{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002488 struct record *rec = &record;
2489
Wang Nan5f9cf592016-04-20 18:59:49 +00002490 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2491 trigger_hit(&auxtrace_snapshot_trigger);
2492 auxtrace_record__snapshot_started = 1;
2493 if (auxtrace_record__snapshot_start(record.itr))
2494 trigger_error(&auxtrace_snapshot_trigger);
2495 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002496
Jiri Olsadc0c6122017-01-09 10:51:58 +01002497 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002498 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002499}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002500
2501static void alarm_sig_handler(int sig __maybe_unused)
2502{
2503 struct record *rec = &record;
2504
2505 if (switch_output_time(rec))
2506 trigger_hit(&switch_output_trigger);
2507}