blob: 907d4d4677a3d0aa80058f7b3aaccd5eb7a06127 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030023#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030027#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030041#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030046#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020047
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030048#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030049#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030050#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030051#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020052#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020053#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030054#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030055#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030056#include <sys/wait.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030057#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030058#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030059#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030060
Jiri Olsa1b43b702017-01-09 10:51:56 +010061struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010062 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010063 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010064 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010065 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010066 const char *str;
67 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070068 char **filenames;
69 int num_files;
70 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010071};
72
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030073struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020074 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030075 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010077 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030078 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020079 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020080 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020082 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000083 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020084 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000085 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090086 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000087 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080088 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010089 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070090 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030091 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020092};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020093
Jiri Olsadc0c6122017-01-09 10:51:58 +010094static volatile int auxtrace_record__snapshot_started;
95static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
96static DEFINE_TRIGGER(switch_output_trigger);
97
Alexey Budankov9d2ed642019-01-22 20:47:43 +030098static const char *affinity_tags[PERF_AFFINITY_MAX] = {
99 "SYS", "NODE", "CPU"
100};
101
Jiri Olsadc0c6122017-01-09 10:51:58 +0100102static bool switch_output_signal(struct record *rec)
103{
104 return rec->switch_output.signal &&
105 trigger_is_ready(&switch_output_trigger);
106}
107
108static bool switch_output_size(struct record *rec)
109{
110 return rec->switch_output.size &&
111 trigger_is_ready(&switch_output_trigger) &&
112 (rec->bytes_written >= rec->switch_output.size);
113}
114
Jiri Olsabfacbe32017-01-09 10:52:00 +0100115static bool switch_output_time(struct record *rec)
116{
117 return rec->switch_output.time &&
118 trigger_is_ready(&switch_output_trigger);
119}
120
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
122 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200123{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200124 struct perf_data_file *file = &rec->session->data->file;
125
126 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100127 pr_err("failed to write perf data, error: %m\n");
128 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200129 }
David Ahern8d3eca22012-08-26 12:24:47 -0600130
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300131 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100132
133 if (switch_output_size(rec))
134 trigger_hit(&switch_output_trigger);
135
David Ahern8d3eca22012-08-26 12:24:47 -0600136 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200137}
138
Alexey Budankovef781122019-03-18 20:44:12 +0300139static int record__aio_enabled(struct record *rec);
140static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300141static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
142 void *src, size_t src_size);
143
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300144#ifdef HAVE_AIO_SUPPORT
145static int record__aio_write(struct aiocb *cblock, int trace_fd,
146 void *buf, size_t size, off_t off)
147{
148 int rc;
149
150 cblock->aio_fildes = trace_fd;
151 cblock->aio_buf = buf;
152 cblock->aio_nbytes = size;
153 cblock->aio_offset = off;
154 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
155
156 do {
157 rc = aio_write(cblock);
158 if (rc == 0) {
159 break;
160 } else if (errno != EAGAIN) {
161 cblock->aio_fildes = -1;
162 pr_err("failed to queue perf data, error: %m\n");
163 break;
164 }
165 } while (1);
166
167 return rc;
168}
169
170static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
171{
172 void *rem_buf;
173 off_t rem_off;
174 size_t rem_size;
175 int rc, aio_errno;
176 ssize_t aio_ret, written;
177
178 aio_errno = aio_error(cblock);
179 if (aio_errno == EINPROGRESS)
180 return 0;
181
182 written = aio_ret = aio_return(cblock);
183 if (aio_ret < 0) {
184 if (aio_errno != EINTR)
185 pr_err("failed to write perf data, error: %m\n");
186 written = 0;
187 }
188
189 rem_size = cblock->aio_nbytes - written;
190
191 if (rem_size == 0) {
192 cblock->aio_fildes = -1;
193 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300194 * md->refcount is incremented in record__aio_pushfn() for
195 * every aio write request started in record__aio_push() so
196 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300197 */
198 perf_mmap__put(md);
199 rc = 1;
200 } else {
201 /*
202 * aio write request may require restart with the
203 * reminder if the kernel didn't write whole
204 * chunk at once.
205 */
206 rem_off = cblock->aio_offset + written;
207 rem_buf = (void *)(cblock->aio_buf + written);
208 record__aio_write(cblock, cblock->aio_fildes,
209 rem_buf, rem_size, rem_off);
210 rc = 0;
211 }
212
213 return rc;
214}
215
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300217{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300218 struct aiocb **aiocb = md->aio.aiocb;
219 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300220 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300221 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300222
223 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300224 do_suspend = 0;
225 for (i = 0; i < md->aio.nr_cblocks; ++i) {
226 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
227 if (sync_all)
228 aiocb[i] = NULL;
229 else
230 return i;
231 } else {
232 /*
233 * Started aio write is not complete yet
234 * so it has to be waited before the
235 * next allocation.
236 */
237 aiocb[i] = &cblocks[i];
238 do_suspend = 1;
239 }
240 }
241 if (!do_suspend)
242 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300243
Alexey Budankov93f20c02018-11-06 12:07:19 +0300244 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300245 if (!(errno == EAGAIN || errno == EINTR))
246 pr_err("failed to sync perf data, error: %m\n");
247 }
248 } while (1);
249}
250
Alexey Budankovef781122019-03-18 20:44:12 +0300251struct record_aio {
252 struct record *rec;
253 void *data;
254 size_t size;
255};
256
257static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300258{
Alexey Budankovef781122019-03-18 20:44:12 +0300259 struct record_aio *aio = to;
260
261 /*
262 * map->base data pointed by buf is copied into free map->aio.data[] buffer
263 * to release space in the kernel buffer as fast as possible, calling
264 * perf_mmap__consume() from perf_mmap__push() function.
265 *
266 * That lets the kernel to proceed with storing more profiling data into
267 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
268 *
269 * Coping can be done in two steps in case the chunk of profiling data
270 * crosses the upper bound of the kernel buffer. In this case we first move
271 * part of data from map->start till the upper bound and then the reminder
272 * from the beginning of the kernel buffer till the end of the data chunk.
273 */
274
275 if (record__comp_enabled(aio->rec)) {
276 size = zstd_compress(aio->rec->session, aio->data + aio->size,
277 perf_mmap__mmap_len(map) - aio->size,
278 buf, size);
279 } else {
280 memcpy(aio->data + aio->size, buf, size);
281 }
282
283 if (!aio->size) {
284 /*
285 * Increment map->refcount to guard map->aio.data[] buffer
286 * from premature deallocation because map object can be
287 * released earlier than aio write request started on
288 * map->aio.data[] buffer is complete.
289 *
290 * perf_mmap__put() is done at record__aio_complete()
291 * after started aio request completion or at record__aio_push()
292 * if the request failed to start.
293 */
294 perf_mmap__get(map);
295 }
296
297 aio->size += size;
298
299 return size;
300}
301
302static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
303{
304 int ret, idx;
305 int trace_fd = rec->session->data->file.fd;
306 struct record_aio aio = { .rec = rec, .size = 0 };
307
308 /*
309 * Call record__aio_sync() to wait till map->aio.data[] buffer
310 * becomes available after previous aio write operation.
311 */
312
313 idx = record__aio_sync(map, false);
314 aio.data = map->aio.data[idx];
315 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
316 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
317 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300318
319 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300320 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300321 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300322 *off += aio.size;
323 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300324 if (switch_output_size(rec))
325 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300326 } else {
327 /*
328 * Decrement map->refcount incremented in record__aio_pushfn()
329 * back if record__aio_write() operation failed to start, otherwise
330 * map->refcount is decremented in record__aio_complete() after
331 * aio write operation finishes successfully.
332 */
333 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300334 }
335
336 return ret;
337}
338
339static off_t record__aio_get_pos(int trace_fd)
340{
341 return lseek(trace_fd, 0, SEEK_CUR);
342}
343
344static void record__aio_set_pos(int trace_fd, off_t pos)
345{
346 lseek(trace_fd, pos, SEEK_SET);
347}
348
349static void record__aio_mmap_read_sync(struct record *rec)
350{
351 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200352 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300353 struct perf_mmap *maps = evlist->mmap;
354
Alexey Budankovef781122019-03-18 20:44:12 +0300355 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300356 return;
357
358 for (i = 0; i < evlist->nr_mmaps; i++) {
359 struct perf_mmap *map = &maps[i];
360
361 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300362 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300363 }
364}
365
366static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300367static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300368
369static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300370 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300371 int unset)
372{
373 struct record_opts *opts = (struct record_opts *)opt->value;
374
Alexey Budankov93f20c02018-11-06 12:07:19 +0300375 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300376 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300377 } else {
378 if (str)
379 opts->nr_cblocks = strtol(str, NULL, 0);
380 if (!opts->nr_cblocks)
381 opts->nr_cblocks = nr_cblocks_default;
382 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300383
384 return 0;
385}
386#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300387static int nr_cblocks_max = 0;
388
Alexey Budankovef781122019-03-18 20:44:12 +0300389static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
390 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300391{
392 return -1;
393}
394
395static off_t record__aio_get_pos(int trace_fd __maybe_unused)
396{
397 return -1;
398}
399
400static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
401{
402}
403
404static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
405{
406}
407#endif
408
409static int record__aio_enabled(struct record *rec)
410{
411 return rec->opts.nr_cblocks > 0;
412}
413
Alexey Budankov470530b2019-03-18 20:40:26 +0300414#define MMAP_FLUSH_DEFAULT 1
415static int record__mmap_flush_parse(const struct option *opt,
416 const char *str,
417 int unset)
418{
419 int flush_max;
420 struct record_opts *opts = (struct record_opts *)opt->value;
421 static struct parse_tag tags[] = {
422 { .tag = 'B', .mult = 1 },
423 { .tag = 'K', .mult = 1 << 10 },
424 { .tag = 'M', .mult = 1 << 20 },
425 { .tag = 'G', .mult = 1 << 30 },
426 { .tag = 0 },
427 };
428
429 if (unset)
430 return 0;
431
432 if (str) {
433 opts->mmap_flush = parse_tag_value(str, tags);
434 if (opts->mmap_flush == (int)-1)
435 opts->mmap_flush = strtol(str, NULL, 0);
436 }
437
438 if (!opts->mmap_flush)
439 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
440
441 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
442 flush_max /= 4;
443 if (opts->mmap_flush > flush_max)
444 opts->mmap_flush = flush_max;
445
446 return 0;
447}
448
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300449#ifdef HAVE_ZSTD_SUPPORT
450static unsigned int comp_level_default = 1;
451
452static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
453{
454 struct record_opts *opts = opt->value;
455
456 if (unset) {
457 opts->comp_level = 0;
458 } else {
459 if (str)
460 opts->comp_level = strtol(str, NULL, 0);
461 if (!opts->comp_level)
462 opts->comp_level = comp_level_default;
463 }
464
465 return 0;
466}
467#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300468static unsigned int comp_level_max = 22;
469
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300470static int record__comp_enabled(struct record *rec)
471{
472 return rec->opts.comp_level > 0;
473}
474
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200475static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200476 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300477 struct perf_sample *sample __maybe_unused,
478 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200479{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300480 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200481 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200482}
483
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200484static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300485{
486 struct record *rec = to;
487
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300488 if (record__comp_enabled(rec)) {
489 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
490 bf = map->data;
491 }
492
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300493 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200494 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300495}
496
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300497static volatile int done;
498static volatile int signr = -1;
499static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000500
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300501static void sig_handler(int sig)
502{
503 if (sig == SIGCHLD)
504 child_finished = 1;
505 else
506 signr = sig;
507
508 done = 1;
509}
510
Wang Nana0748652016-11-26 07:03:28 +0000511static void sigsegv_handler(int sig)
512{
513 perf_hooks__recover();
514 sighandler_dump_stack(sig);
515}
516
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300517static void record__sig_exit(void)
518{
519 if (signr == -1)
520 return;
521
522 signal(signr, SIG_DFL);
523 raise(signr);
524}
525
Adrian Huntere31f0d02015-04-30 17:37:27 +0300526#ifdef HAVE_AUXTRACE_SUPPORT
527
Adrian Hunteref149c22015-04-09 18:53:45 +0300528static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200529 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300530 union perf_event *event, void *data1,
531 size_t len1, void *data2, size_t len2)
532{
533 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100534 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300535 size_t padding;
536 u8 pad[8] = {0};
537
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100538 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300539 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100540 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300541 int err;
542
543 file_offset = lseek(fd, 0, SEEK_CUR);
544 if (file_offset == -1)
545 return -1;
546 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
547 event, file_offset);
548 if (err)
549 return err;
550 }
551
Adrian Hunteref149c22015-04-09 18:53:45 +0300552 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
553 padding = (len1 + len2) & 7;
554 if (padding)
555 padding = 8 - padding;
556
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200557 record__write(rec, map, event, event->header.size);
558 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300559 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200560 record__write(rec, map, data2, len2);
561 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300562
563 return 0;
564}
565
566static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200567 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300568{
569 int ret;
570
Jiri Olsae035f4c2018-09-13 14:54:05 +0200571 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300572 record__process_auxtrace);
573 if (ret < 0)
574 return ret;
575
576 if (ret)
577 rec->samples++;
578
579 return 0;
580}
581
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300582static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200583 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300584{
585 int ret;
586
Jiri Olsae035f4c2018-09-13 14:54:05 +0200587 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300588 record__process_auxtrace,
589 rec->opts.auxtrace_snapshot_size);
590 if (ret < 0)
591 return ret;
592
593 if (ret)
594 rec->samples++;
595
596 return 0;
597}
598
599static int record__auxtrace_read_snapshot_all(struct record *rec)
600{
601 int i;
602 int rc = 0;
603
604 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200605 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300606
Jiri Olsae035f4c2018-09-13 14:54:05 +0200607 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300608 continue;
609
Jiri Olsae035f4c2018-09-13 14:54:05 +0200610 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300611 rc = -1;
612 goto out;
613 }
614 }
615out:
616 return rc;
617}
618
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300619static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300620{
621 pr_debug("Recording AUX area tracing snapshot\n");
622 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000623 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300624 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300625 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000626 trigger_error(&auxtrace_snapshot_trigger);
627 else
628 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300629 }
630}
631
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300632static int record__auxtrace_snapshot_exit(struct record *rec)
633{
634 if (trigger_is_error(&auxtrace_snapshot_trigger))
635 return 0;
636
637 if (!auxtrace_record__snapshot_started &&
638 auxtrace_record__snapshot_start(rec->itr))
639 return -1;
640
641 record__read_auxtrace_snapshot(rec, true);
642 if (trigger_is_error(&auxtrace_snapshot_trigger))
643 return -1;
644
645 return 0;
646}
647
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200648static int record__auxtrace_init(struct record *rec)
649{
650 int err;
651
652 if (!rec->itr) {
653 rec->itr = auxtrace_record__init(rec->evlist, &err);
654 if (err)
655 return err;
656 }
657
658 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
659 rec->opts.auxtrace_snapshot_opts);
660 if (err)
661 return err;
662
663 return auxtrace_parse_filters(rec->evlist);
664}
665
Adrian Huntere31f0d02015-04-30 17:37:27 +0300666#else
667
668static inline
669int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200670 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300671{
672 return 0;
673}
674
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300675static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300676void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
677 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300678{
679}
680
681static inline
682int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
683{
684 return 0;
685}
686
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300687static inline
688int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
689{
690 return 0;
691}
692
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200693static int record__auxtrace_init(struct record *rec __maybe_unused)
694{
695 return 0;
696}
697
Adrian Huntere31f0d02015-04-30 17:37:27 +0300698#endif
699
Wang Nancda57a82016-06-27 10:24:03 +0000700static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200701 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000702{
703 struct record_opts *opts = &rec->opts;
704 char msg[512];
705
Alexey Budankovf13de662019-01-22 20:50:57 +0300706 if (opts->affinity != PERF_AFFINITY_SYS)
707 cpu__setup_cpunode_map();
708
Wang Nan7a276ff2017-12-03 02:00:38 +0000709 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000710 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300711 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300712 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300713 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000714 if (errno == EPERM) {
715 pr_err("Permission error mapping pages.\n"
716 "Consider increasing "
717 "/proc/sys/kernel/perf_event_mlock_kb,\n"
718 "or try again with a smaller value of -m/--mmap_pages.\n"
719 "(current value: %u,%u)\n",
720 opts->mmap_pages, opts->auxtrace_mmap_pages);
721 return -errno;
722 } else {
723 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300724 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000725 if (errno)
726 return -errno;
727 else
728 return -EINVAL;
729 }
730 }
731 return 0;
732}
733
734static int record__mmap(struct record *rec)
735{
736 return record__mmap_evlist(rec, rec->evlist);
737}
738
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300739static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200740{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300741 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200742 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200743 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200744 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300745 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600746 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200747
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300748 /*
749 * For initial_delay we need to add a dummy event so that we can track
750 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
751 * real events, the ones asked by the user.
752 */
753 if (opts->initial_delay) {
754 if (perf_evlist__add_dummy(evlist))
755 return -ENOMEM;
756
757 pos = perf_evlist__first(evlist);
758 pos->tracking = 0;
759 pos = perf_evlist__last(evlist);
760 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200761 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300762 }
763
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300764 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100765
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300766 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200767try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200768 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300769 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900770 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300771 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300772 goto try_again;
773 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700774 if ((errno == EINVAL || errno == EBADF) &&
775 pos->leader != pos &&
776 pos->weak_group) {
777 pos = perf_evlist__reset_weak_group(evlist, pos);
778 goto try_again;
779 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300780 rc = -errno;
781 perf_evsel__open_strerror(pos, &opts->target,
782 errno, msg, sizeof(msg));
783 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600784 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300785 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800786
787 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800788 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200789
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300790 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300791 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300792 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300793 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600794 rc = -1;
795 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100796 }
797
Wang Nancda57a82016-06-27 10:24:03 +0000798 rc = record__mmap(rec);
799 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600800 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200801
Jiri Olsa563aecb2013-06-05 13:35:06 +0200802 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300803 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600804out:
805 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200806}
807
Namhyung Kime3d59112015-01-29 17:06:44 +0900808static int process_sample_event(struct perf_tool *tool,
809 union perf_event *event,
810 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200811 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900812 struct machine *machine)
813{
814 struct record *rec = container_of(tool, struct record, tool);
815
Jin Yao68588ba2017-12-08 21:13:42 +0800816 if (rec->evlist->first_sample_time == 0)
817 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900818
Jin Yao68588ba2017-12-08 21:13:42 +0800819 rec->evlist->last_sample_time = sample->time;
820
821 if (rec->buildid_all)
822 return 0;
823
824 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900825 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
826}
827
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300828static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200829{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200830 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200831
Jiri Olsa45112e82019-02-21 10:41:29 +0100832 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300833 return 0;
834
Namhyung Kim00dc8652014-11-04 10:14:32 +0900835 /*
836 * During this process, it'll load kernel map and replace the
837 * dso->long_name to a real pathname it found. In this case
838 * we prefer the vmlinux path like
839 * /lib/modules/3.16.4/build/vmlinux
840 *
841 * rather than build-id path (in debug directory).
842 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
843 */
844 symbol_conf.ignore_vmlinux_buildid = true;
845
Namhyung Kim61566812016-01-11 22:37:09 +0900846 /*
847 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800848 * so no need to process samples. But if timestamp_boundary is enabled,
849 * it still needs to walk on all samples to get the timestamps of
850 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900851 */
Jin Yao68588ba2017-12-08 21:13:42 +0800852 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900853 rec->tool.sample = NULL;
854
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300855 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200856}
857
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200858static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800859{
860 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200861 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800862 /*
863 *As for guest kernel when processing subcommand record&report,
864 *we arrange module mmap prior to guest kernel mmap and trigger
865 *a preload dso because default guest module symbols are loaded
866 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
867 *method is used to avoid symbol missing when the first addr is
868 *in module instead of in guest kernel.
869 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200870 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200871 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800872 if (err < 0)
873 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300874 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800875
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800876 /*
877 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
878 * have no _text sometimes.
879 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200880 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200881 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800882 if (err < 0)
883 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300884 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800885}
886
Frederic Weisbecker98402802010-05-02 22:05:29 +0200887static struct perf_event_header finished_round_event = {
888 .size = sizeof(struct perf_event_header),
889 .type = PERF_RECORD_FINISHED_ROUND,
890};
891
Alexey Budankovf13de662019-01-22 20:50:57 +0300892static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
893{
894 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
895 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
896 CPU_ZERO(&rec->affinity_mask);
897 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
898 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
899 }
900}
901
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300902static size_t process_comp_header(void *record, size_t increment)
903{
Jiri Olsa72932372019-08-28 15:57:16 +0200904 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300905 size_t size = sizeof(*event);
906
907 if (increment) {
908 event->header.size += increment;
909 return increment;
910 }
911
912 event->header.type = PERF_RECORD_COMPRESSED;
913 event->header.size = size;
914
915 return size;
916}
917
918static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
919 void *src, size_t src_size)
920{
921 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200922 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300923
924 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
925 max_record_size, process_comp_header);
926
927 session->bytes_transferred += src_size;
928 session->bytes_compressed += compressed;
929
930 return compressed;
931}
932
Jiri Olsa63503db2019-07-21 13:23:52 +0200933static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300934 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200935{
Jiri Olsadcabb502014-07-25 16:56:16 +0200936 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200937 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600938 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000939 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300940 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300941 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200942
Wang Nancb216862016-06-27 10:24:04 +0000943 if (!evlist)
944 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300945
Wang Nan0b72d692017-12-04 16:51:07 +0000946 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000947 if (!maps)
948 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000949
Wang Nan0b72d692017-12-04 16:51:07 +0000950 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000951 return 0;
952
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300953 if (record__aio_enabled(rec))
954 off = record__aio_get_pos(trace_fd);
955
Wang Nana4ea0ec2016-07-14 08:34:36 +0000956 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300957 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200958 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000959
Jiri Olsae035f4c2018-09-13 14:54:05 +0200960 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300961 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300962 if (synch) {
963 flush = map->flush;
964 map->flush = 1;
965 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300966 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300967 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300968 if (synch)
969 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300970 rc = -1;
971 goto out;
972 }
973 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300974 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300975 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300976 if (synch)
977 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300978 rc = -1;
979 goto out;
980 }
David Ahern8d3eca22012-08-26 12:24:47 -0600981 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300982 if (synch)
983 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600984 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300985
Jiri Olsae035f4c2018-09-13 14:54:05 +0200986 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
987 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300988 rc = -1;
989 goto out;
990 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200991 }
992
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300993 if (record__aio_enabled(rec))
994 record__aio_set_pos(trace_fd, off);
995
Jiri Olsadcabb502014-07-25 16:56:16 +0200996 /*
997 * Mark the round finished in case we wrote
998 * at least one event.
999 */
1000 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001001 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001002
Wang Nan0b72d692017-12-04 16:51:07 +00001003 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001004 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001005out:
1006 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001007}
1008
Alexey Budankov470530b2019-03-18 20:40:26 +03001009static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001010{
1011 int err;
1012
Alexey Budankov470530b2019-03-18 20:40:26 +03001013 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001014 if (err)
1015 return err;
1016
Alexey Budankov470530b2019-03-18 20:40:26 +03001017 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001018}
1019
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001020static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001021{
David Ahern57706ab2013-11-06 11:41:34 -07001022 struct perf_session *session = rec->session;
1023 int feat;
1024
1025 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1026 perf_header__set_feat(&session->header, feat);
1027
1028 if (rec->no_buildid)
1029 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1030
Jiri Olsace9036a2019-07-21 13:24:23 +02001031 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001032 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1033
1034 if (!rec->opts.branch_stack)
1035 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001036
1037 if (!rec->opts.full_auxtrace)
1038 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001039
Alexey Budankovcf790512018-10-09 17:36:24 +03001040 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1041 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1042
Jiri Olsa258031c2019-03-08 14:47:39 +01001043 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001044 if (!record__comp_enabled(rec))
1045 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001046
Jiri Olsaffa517a2015-10-25 15:51:43 +01001047 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001048}
1049
Wang Nane1ab48b2016-02-26 09:32:10 +00001050static void
1051record__finish_output(struct record *rec)
1052{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001053 struct perf_data *data = &rec->data;
1054 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001055
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001056 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001057 return;
1058
1059 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001060 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001061
1062 if (!rec->no_buildid) {
1063 process_buildids(rec);
1064
1065 if (rec->buildid_all)
1066 dsos__hit_all(rec->session);
1067 }
1068 perf_session__write_header(rec->session, rec->evlist, fd, true);
1069
1070 return;
1071}
1072
Wang Nan4ea648a2016-07-14 08:34:47 +00001073static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001074{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001075 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001076 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001077
Wang Nan4ea648a2016-07-14 08:34:47 +00001078 if (rec->opts.tail_synthesize != tail)
1079 return 0;
1080
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001081 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1082 if (thread_map == NULL)
1083 return -1;
1084
1085 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001086 process_synthesized_event,
1087 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001088 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001089 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001090 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001091}
1092
Wang Nan4ea648a2016-07-14 08:34:47 +00001093static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001094
Wang Nanecfd7a92016-04-13 08:21:07 +00001095static int
1096record__switch_output(struct record *rec, bool at_exit)
1097{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001098 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001099 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001100 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001101
1102 /* Same Size: "2015122520103046"*/
1103 char timestamp[] = "InvalidTimestamp";
1104
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001105 record__aio_mmap_read_sync(rec);
1106
Wang Nan4ea648a2016-07-14 08:34:47 +00001107 record__synthesize(rec, true);
1108 if (target__none(&rec->opts.target))
1109 record__synthesize_workload(rec, true);
1110
Wang Nanecfd7a92016-04-13 08:21:07 +00001111 rec->samples = 0;
1112 record__finish_output(rec);
1113 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1114 if (err) {
1115 pr_err("Failed to get current timestamp\n");
1116 return -EINVAL;
1117 }
1118
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001119 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001120 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001121 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001122 if (fd >= 0 && !at_exit) {
1123 rec->bytes_written = 0;
1124 rec->session->header.data_size = 0;
1125 }
1126
1127 if (!quiet)
1128 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001129 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001130
Andi Kleen03724b22019-03-14 15:49:55 -07001131 if (rec->switch_output.num_files) {
1132 int n = rec->switch_output.cur_file + 1;
1133
1134 if (n >= rec->switch_output.num_files)
1135 n = 0;
1136 rec->switch_output.cur_file = n;
1137 if (rec->switch_output.filenames[n]) {
1138 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001139 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001140 }
1141 rec->switch_output.filenames[n] = new_filename;
1142 } else {
1143 free(new_filename);
1144 }
1145
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001146 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001147 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001148 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001149
Wang Nanbe7b0c92016-04-20 18:59:54 +00001150 /*
1151 * In 'perf record --switch-output' without -a,
1152 * record__synthesize() in record__switch_output() won't
1153 * generate tracking events because there's no thread_map
1154 * in evlist. Which causes newly created perf.data doesn't
1155 * contain map and comm information.
1156 * Create a fake thread_map and directly call
1157 * perf_event__synthesize_thread_map() for those events.
1158 */
1159 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001160 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001161 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001162 return fd;
1163}
1164
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001165static volatile int workload_exec_errno;
1166
1167/*
1168 * perf_evlist__prepare_workload will send a SIGUSR1
1169 * if the fork fails, since we asked by setting its
1170 * want_signal to true.
1171 */
Namhyung Kim45604712014-05-12 09:47:24 +09001172static void workload_exec_failed_signal(int signo __maybe_unused,
1173 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001174 void *ucontext __maybe_unused)
1175{
1176 workload_exec_errno = info->si_value.sival_int;
1177 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001178 child_finished = 1;
1179}
1180
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001181static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001182static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001183
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001184int __weak
1185perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1186 struct perf_tool *tool __maybe_unused,
1187 perf_event__handler_t process __maybe_unused,
1188 struct machine *machine __maybe_unused)
1189{
1190 return 0;
1191}
1192
Wang Nanee667f92016-06-27 10:24:05 +00001193static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001194perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001195{
Wang Nanb2cb6152016-07-14 08:34:39 +00001196 if (evlist) {
1197 if (evlist->mmap && evlist->mmap[0].base)
1198 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001199 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1200 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001201 }
Wang Nanee667f92016-06-27 10:24:05 +00001202 return NULL;
1203}
1204
Wang Nanc45628b2016-05-24 02:28:59 +00001205static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1206{
Wang Nanee667f92016-06-27 10:24:05 +00001207 const struct perf_event_mmap_page *pc;
1208
1209 pc = perf_evlist__pick_pc(rec->evlist);
1210 if (pc)
1211 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001212 return NULL;
1213}
1214
Wang Nan4ea648a2016-07-14 08:34:47 +00001215static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001216{
1217 struct perf_session *session = rec->session;
1218 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001219 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001220 struct record_opts *opts = &rec->opts;
1221 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001222 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001223 int err = 0;
1224
Wang Nan4ea648a2016-07-14 08:34:47 +00001225 if (rec->opts.tail_synthesize != tail)
1226 return 0;
1227
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001228 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001229 /*
1230 * We need to synthesize events first, because some
1231 * features works on top of them (on report side).
1232 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001233 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001234 process_synthesized_event);
1235 if (err < 0) {
1236 pr_err("Couldn't synthesize attrs.\n");
1237 goto out;
1238 }
1239
Jiri Olsaa2015512018-03-14 10:22:04 +01001240 err = perf_event__synthesize_features(tool, session, rec->evlist,
1241 process_synthesized_event);
1242 if (err < 0) {
1243 pr_err("Couldn't synthesize features.\n");
1244 return err;
1245 }
1246
Jiri Olsace9036a2019-07-21 13:24:23 +02001247 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001248 /*
1249 * FIXME err <= 0 here actually means that
1250 * there were no tracepoints so its not really
1251 * an error, just that we don't need to
1252 * synthesize anything. We really have to
1253 * return this more properly and also
1254 * propagate errors that now are calling die()
1255 */
1256 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1257 process_synthesized_event);
1258 if (err <= 0) {
1259 pr_err("Couldn't record tracing data.\n");
1260 goto out;
1261 }
1262 rec->bytes_written += err;
1263 }
1264 }
1265
Wang Nanc45628b2016-05-24 02:28:59 +00001266 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001267 process_synthesized_event, machine);
1268 if (err)
1269 goto out;
1270
Wang Nanc45c86e2016-02-26 09:32:07 +00001271 if (rec->opts.full_auxtrace) {
1272 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1273 session, process_synthesized_event);
1274 if (err)
1275 goto out;
1276 }
1277
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001278 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1279 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1280 machine);
1281 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1282 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1283 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001284
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001285 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1286 machine);
1287 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1288 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1289 "Check /proc/modules permission or run as root.\n");
1290 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001291
1292 if (perf_guest) {
1293 machines__process_guests(&session->machines,
1294 perf_event__synthesize_guest_os, tool);
1295 }
1296
Andi Kleenbfd8f722017-11-17 13:42:58 -08001297 err = perf_event__synthesize_extra_attr(&rec->tool,
1298 rec->evlist,
1299 process_synthesized_event,
1300 data->is_pipe);
1301 if (err)
1302 goto out;
1303
Jiri Olsa03617c22019-07-21 13:24:42 +02001304 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001305 process_synthesized_event,
1306 NULL);
1307 if (err < 0) {
1308 pr_err("Couldn't synthesize thread map.\n");
1309 return err;
1310 }
1311
Jiri Olsaf72f9012019-07-21 13:24:41 +02001312 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001313 process_synthesized_event, NULL);
1314 if (err < 0) {
1315 pr_err("Couldn't synthesize cpu map.\n");
1316 return err;
1317 }
1318
Song Liue5416952019-03-11 22:30:41 -07001319 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001320 machine, opts);
1321 if (err < 0)
1322 pr_warning("Couldn't synthesize bpf events.\n");
1323
Jiri Olsa03617c22019-07-21 13:24:42 +02001324 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001325 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001326 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001327out:
1328 return err;
1329}
1330
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001331static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001332{
David Ahern57706ab2013-11-06 11:41:34 -07001333 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001334 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001335 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001336 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001337 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001338 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001339 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001340 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001341 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001342 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001343 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001344 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001345
Namhyung Kim45604712014-05-12 09:47:24 +09001346 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001347 signal(SIGCHLD, sig_handler);
1348 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001349 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001350 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001351
Hari Bathinif3b36142017-03-08 02:11:43 +05301352 if (rec->opts.record_namespaces)
1353 tool->namespace_events = true;
1354
Jiri Olsadc0c6122017-01-09 10:51:58 +01001355 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001356 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001357 if (rec->opts.auxtrace_snapshot_mode)
1358 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001359 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001360 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001361 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001362 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001363 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001364
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001365 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001366 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001367 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001368 return -1;
1369 }
1370
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001371 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001372 rec->session = session;
1373
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001374 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1375 pr_err("Compression initialization failed.\n");
1376 return -1;
1377 }
1378
1379 session->header.env.comp_type = PERF_COMP_ZSTD;
1380 session->header.env.comp_level = rec->opts.comp_level;
1381
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001382 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001383
Alexey Budankovcf790512018-10-09 17:36:24 +03001384 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1385 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1386
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001387 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001388 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001389 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001390 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001391 if (err < 0) {
1392 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001393 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001394 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001395 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001396 }
1397
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001398 /*
1399 * If we have just single event and are sending data
1400 * through pipe, we need to force the ids allocation,
1401 * because we synthesize event name through the pipe
1402 * and need the id for that.
1403 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001404 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001405 rec->opts.sample_id = true;
1406
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001407 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001408 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001409 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001410 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001411 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001412
Wang Nan8690a2a2016-02-22 09:10:32 +00001413 err = bpf__apply_obj_config();
1414 if (err) {
1415 char errbuf[BUFSIZ];
1416
1417 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1418 pr_err("ERROR: Apply config to BPF failed: %s\n",
1419 errbuf);
1420 goto out_child;
1421 }
1422
Adrian Huntercca84822015-08-19 17:29:21 +03001423 /*
1424 * Normally perf_session__new would do this, but it doesn't have the
1425 * evlist.
1426 */
1427 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1428 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1429 rec->tool.ordered_events = false;
1430 }
1431
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001432 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001433 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1434
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001435 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001436 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001437 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001438 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001439 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001440 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001441 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001442 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001443 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001444
David Ahernd3665492012-02-06 15:27:52 -07001445 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001446 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001447 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001448 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001449 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001450 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001451 }
1452
Song Liud56354d2019-03-11 22:30:51 -07001453 if (!opts->no_bpf_event)
1454 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1455
Song Liu657ee552019-03-11 22:30:50 -07001456 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1457 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1458 opts->no_bpf_event = true;
1459 }
1460
Wang Nan4ea648a2016-07-14 08:34:47 +00001461 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001462 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001463 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001464
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001465 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001466 struct sched_param param;
1467
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001468 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001469 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001470 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001471 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001472 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001473 }
1474 }
1475
Jiri Olsa774cb492012-11-12 18:34:01 +01001476 /*
1477 * When perf is starting the traced process, all the events
1478 * (apart from group members) have enable_on_exec=1 set,
1479 * so don't spoil it by prematurely enabling them.
1480 */
Andi Kleen6619a532014-01-11 13:38:27 -08001481 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001482 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001483
Peter Zijlstra856e9662009-12-16 17:55:55 +01001484 /*
1485 * Let the child rip
1486 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001487 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001488 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001489 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301490 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001491
1492 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1493 if (event == NULL) {
1494 err = -ENOMEM;
1495 goto out_child;
1496 }
1497
Namhyung Kime803cf92015-09-22 09:24:55 +09001498 /*
1499 * Some H/W events are generated before COMM event
1500 * which is emitted during exec(), so perf script
1501 * cannot see a correct process name for those events.
1502 * Synthesize COMM event to prevent it.
1503 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301504 tgid = perf_event__synthesize_comm(tool, event,
1505 rec->evlist->workload.pid,
1506 process_synthesized_event,
1507 machine);
1508 free(event);
1509
1510 if (tgid == -1)
1511 goto out_child;
1512
1513 event = malloc(sizeof(event->namespaces) +
1514 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1515 machine->id_hdr_size);
1516 if (event == NULL) {
1517 err = -ENOMEM;
1518 goto out_child;
1519 }
1520
1521 /*
1522 * Synthesize NAMESPACES event for the command specified.
1523 */
1524 perf_event__synthesize_namespaces(tool, event,
1525 rec->evlist->workload.pid,
1526 tgid, process_synthesized_event,
1527 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001528 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001529
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001530 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001531 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001532
Andi Kleen6619a532014-01-11 13:38:27 -08001533 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001534 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001535 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001536 }
1537
Wang Nan5f9cf592016-04-20 18:59:49 +00001538 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001539 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001540 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001541 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001542 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001543
Wang Nan057374642016-07-14 08:34:43 +00001544 /*
1545 * rec->evlist->bkw_mmap_state is possible to be
1546 * BKW_MMAP_EMPTY here: when done == true and
1547 * hits != rec->samples in previous round.
1548 *
1549 * perf_evlist__toggle_bkw_mmap ensure we never
1550 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1551 */
1552 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1553 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1554
Alexey Budankov470530b2019-03-18 20:40:26 +03001555 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001556 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001557 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001558 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001559 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001560 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001561
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001562 if (auxtrace_record__snapshot_started) {
1563 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001564 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001565 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001566 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001567 pr_err("AUX area tracing snapshot failed\n");
1568 err = -1;
1569 goto out_child;
1570 }
1571 }
1572
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001573 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001574 /*
1575 * If switch_output_trigger is hit, the data in
1576 * overwritable ring buffer should have been collected,
1577 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1578 *
1579 * If SIGUSR2 raise after or during record__mmap_read_all(),
1580 * record__mmap_read_all() didn't collect data from
1581 * overwritable ring buffer. Read again.
1582 */
1583 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1584 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001585 trigger_ready(&switch_output_trigger);
1586
Wang Nan057374642016-07-14 08:34:43 +00001587 /*
1588 * Reenable events in overwrite ring buffer after
1589 * record__mmap_read_all(): we should have collected
1590 * data from it.
1591 */
1592 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1593
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001594 if (!quiet)
1595 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1596 waking);
1597 waking = 0;
1598 fd = record__switch_output(rec, false);
1599 if (fd < 0) {
1600 pr_err("Failed to switch to new file\n");
1601 trigger_error(&switch_output_trigger);
1602 err = fd;
1603 goto out_child;
1604 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001605
1606 /* re-arm the alarm */
1607 if (rec->switch_output.time)
1608 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001609 }
1610
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001611 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001612 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001613 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -03001614 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001615 /*
1616 * Propagate error, only if there's any. Ignore positive
1617 * number of returned events and interrupt error.
1618 */
1619 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001620 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001621 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001622
1623 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1624 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001625 }
1626
Jiri Olsa774cb492012-11-12 18:34:01 +01001627 /*
1628 * When perf is starting the traced process, at the end events
1629 * die with the process and we wait for that. Thus no need to
1630 * disable events in this case.
1631 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001632 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001633 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001634 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001635 disabled = true;
1636 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001637 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001638
Wang Nan5f9cf592016-04-20 18:59:49 +00001639 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001640 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001641
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001642 if (opts->auxtrace_snapshot_on_exit)
1643 record__auxtrace_snapshot_exit(rec);
1644
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001645 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001646 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001647 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001648 pr_err("Workload failed: %s\n", emsg);
1649 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001650 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001651 }
1652
Namhyung Kime3d59112015-01-29 17:06:44 +09001653 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001654 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001655
Wang Nan4ea648a2016-07-14 08:34:47 +00001656 if (target__none(&rec->opts.target))
1657 record__synthesize_workload(rec, true);
1658
Namhyung Kim45604712014-05-12 09:47:24 +09001659out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001660 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001661 record__aio_mmap_read_sync(rec);
1662
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001663 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1664 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1665 session->header.env.comp_ratio = ratio + 0.5;
1666 }
1667
Namhyung Kim45604712014-05-12 09:47:24 +09001668 if (forks) {
1669 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001670
Namhyung Kim45604712014-05-12 09:47:24 +09001671 if (!child_finished)
1672 kill(rec->evlist->workload.pid, SIGTERM);
1673
1674 wait(&exit_status);
1675
1676 if (err < 0)
1677 status = err;
1678 else if (WIFEXITED(exit_status))
1679 status = WEXITSTATUS(exit_status);
1680 else if (WIFSIGNALED(exit_status))
1681 signr = WTERMSIG(exit_status);
1682 } else
1683 status = err;
1684
Wang Nan4ea648a2016-07-14 08:34:47 +00001685 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001686 /* this will be recalculated during process_buildids() */
1687 rec->samples = 0;
1688
Wang Nanecfd7a92016-04-13 08:21:07 +00001689 if (!err) {
1690 if (!rec->timestamp_filename) {
1691 record__finish_output(rec);
1692 } else {
1693 fd = record__switch_output(rec, true);
1694 if (fd < 0) {
1695 status = fd;
1696 goto out_delete_session;
1697 }
1698 }
1699 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001700
Wang Nana0748652016-11-26 07:03:28 +00001701 perf_hooks__invoke_record_end();
1702
Namhyung Kime3d59112015-01-29 17:06:44 +09001703 if (!err && !quiet) {
1704 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001705 const char *postfix = rec->timestamp_filename ?
1706 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001707
Adrian Hunteref149c22015-04-09 18:53:45 +03001708 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001709 scnprintf(samples, sizeof(samples),
1710 " (%" PRIu64 " samples)", rec->samples);
1711 else
1712 samples[0] = '\0';
1713
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001714 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001715 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001716 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001717 if (ratio) {
1718 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1719 rec->session->bytes_transferred / 1024.0 / 1024.0,
1720 ratio);
1721 }
1722 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001723 }
1724
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001725out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001726 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001727 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001728
1729 if (!opts->no_bpf_event)
1730 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001731 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001732}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001733
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001734static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001735{
Kan Liangaad2b212015-01-05 13:23:04 -05001736 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001737
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001738 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001739
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001740 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001741 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001742 callchain->dump_size);
1743}
1744
1745int record_opts__parse_callchain(struct record_opts *record,
1746 struct callchain_param *callchain,
1747 const char *arg, bool unset)
1748{
1749 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001750 callchain->enabled = !unset;
1751
1752 /* --no-call-graph */
1753 if (unset) {
1754 callchain->record_mode = CALLCHAIN_NONE;
1755 pr_debug("callchain: disabled\n");
1756 return 0;
1757 }
1758
1759 ret = parse_callchain_record_opt(arg, callchain);
1760 if (!ret) {
1761 /* Enable data address sampling for DWARF unwind. */
1762 if (callchain->record_mode == CALLCHAIN_DWARF)
1763 record->sample_address = true;
1764 callchain_debug(callchain);
1765 }
1766
1767 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001768}
1769
Kan Liangc421e802015-07-29 05:42:12 -04001770int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001771 const char *arg,
1772 int unset)
1773{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001774 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001775}
1776
Kan Liangc421e802015-07-29 05:42:12 -04001777int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001778 const char *arg __maybe_unused,
1779 int unset __maybe_unused)
1780{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001781 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001782
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001783 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001784
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001785 if (callchain->record_mode == CALLCHAIN_NONE)
1786 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001787
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001788 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001789 return 0;
1790}
1791
Jiri Olsaeb853e82014-02-03 12:44:42 +01001792static int perf_record_config(const char *var, const char *value, void *cb)
1793{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001794 struct record *rec = cb;
1795
1796 if (!strcmp(var, "record.build-id")) {
1797 if (!strcmp(value, "cache"))
1798 rec->no_buildid_cache = false;
1799 else if (!strcmp(value, "no-cache"))
1800 rec->no_buildid_cache = true;
1801 else if (!strcmp(value, "skip"))
1802 rec->no_buildid = true;
1803 else
1804 return -1;
1805 return 0;
1806 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001807 if (!strcmp(var, "record.call-graph")) {
1808 var = "call-graph.record-mode";
1809 return perf_default_config(var, value, cb);
1810 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001811#ifdef HAVE_AIO_SUPPORT
1812 if (!strcmp(var, "record.aio")) {
1813 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1814 if (!rec->opts.nr_cblocks)
1815 rec->opts.nr_cblocks = nr_cblocks_default;
1816 }
1817#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001818
Yisheng Xiecff17202018-03-12 19:25:57 +08001819 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001820}
1821
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001822struct clockid_map {
1823 const char *name;
1824 int clockid;
1825};
1826
1827#define CLOCKID_MAP(n, c) \
1828 { .name = n, .clockid = (c), }
1829
1830#define CLOCKID_END { .name = NULL, }
1831
1832
1833/*
1834 * Add the missing ones, we need to build on many distros...
1835 */
1836#ifndef CLOCK_MONOTONIC_RAW
1837#define CLOCK_MONOTONIC_RAW 4
1838#endif
1839#ifndef CLOCK_BOOTTIME
1840#define CLOCK_BOOTTIME 7
1841#endif
1842#ifndef CLOCK_TAI
1843#define CLOCK_TAI 11
1844#endif
1845
1846static const struct clockid_map clockids[] = {
1847 /* available for all events, NMI safe */
1848 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1849 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1850
1851 /* available for some events */
1852 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1853 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1854 CLOCKID_MAP("tai", CLOCK_TAI),
1855
1856 /* available for the lazy */
1857 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1858 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1859 CLOCKID_MAP("real", CLOCK_REALTIME),
1860 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1861
1862 CLOCKID_END,
1863};
1864
Alexey Budankovcf790512018-10-09 17:36:24 +03001865static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1866{
1867 struct timespec res;
1868
1869 *res_ns = 0;
1870 if (!clock_getres(clk_id, &res))
1871 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1872 else
1873 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1874
1875 return 0;
1876}
1877
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001878static int parse_clockid(const struct option *opt, const char *str, int unset)
1879{
1880 struct record_opts *opts = (struct record_opts *)opt->value;
1881 const struct clockid_map *cm;
1882 const char *ostr = str;
1883
1884 if (unset) {
1885 opts->use_clockid = 0;
1886 return 0;
1887 }
1888
1889 /* no arg passed */
1890 if (!str)
1891 return 0;
1892
1893 /* no setting it twice */
1894 if (opts->use_clockid)
1895 return -1;
1896
1897 opts->use_clockid = true;
1898
1899 /* if its a number, we're done */
1900 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001901 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001902
1903 /* allow a "CLOCK_" prefix to the name */
1904 if (!strncasecmp(str, "CLOCK_", 6))
1905 str += 6;
1906
1907 for (cm = clockids; cm->name; cm++) {
1908 if (!strcasecmp(str, cm->name)) {
1909 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001910 return get_clockid_res(opts->clockid,
1911 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001912 }
1913 }
1914
1915 opts->use_clockid = false;
1916 ui__warning("unknown clockid %s, check man page\n", ostr);
1917 return -1;
1918}
1919
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001920static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1921{
1922 struct record_opts *opts = (struct record_opts *)opt->value;
1923
1924 if (unset || !str)
1925 return 0;
1926
1927 if (!strcasecmp(str, "node"))
1928 opts->affinity = PERF_AFFINITY_NODE;
1929 else if (!strcasecmp(str, "cpu"))
1930 opts->affinity = PERF_AFFINITY_CPU;
1931
1932 return 0;
1933}
1934
Adrian Huntere9db1312015-04-09 18:53:46 +03001935static int record__parse_mmap_pages(const struct option *opt,
1936 const char *str,
1937 int unset __maybe_unused)
1938{
1939 struct record_opts *opts = opt->value;
1940 char *s, *p;
1941 unsigned int mmap_pages;
1942 int ret;
1943
1944 if (!str)
1945 return -EINVAL;
1946
1947 s = strdup(str);
1948 if (!s)
1949 return -ENOMEM;
1950
1951 p = strchr(s, ',');
1952 if (p)
1953 *p = '\0';
1954
1955 if (*s) {
1956 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1957 if (ret)
1958 goto out_free;
1959 opts->mmap_pages = mmap_pages;
1960 }
1961
1962 if (!p) {
1963 ret = 0;
1964 goto out_free;
1965 }
1966
1967 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1968 if (ret)
1969 goto out_free;
1970
1971 opts->auxtrace_mmap_pages = mmap_pages;
1972
1973out_free:
1974 free(s);
1975 return ret;
1976}
1977
Jiri Olsa0c582442017-01-09 10:51:59 +01001978static void switch_output_size_warn(struct record *rec)
1979{
1980 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1981 struct switch_output *s = &rec->switch_output;
1982
1983 wakeup_size /= 2;
1984
1985 if (s->size < wakeup_size) {
1986 char buf[100];
1987
1988 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1989 pr_warning("WARNING: switch-output data size lower than "
1990 "wakeup kernel buffer size (%s) "
1991 "expect bigger perf.data sizes\n", buf);
1992 }
1993}
1994
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001995static int switch_output_setup(struct record *rec)
1996{
1997 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001998 static struct parse_tag tags_size[] = {
1999 { .tag = 'B', .mult = 1 },
2000 { .tag = 'K', .mult = 1 << 10 },
2001 { .tag = 'M', .mult = 1 << 20 },
2002 { .tag = 'G', .mult = 1 << 30 },
2003 { .tag = 0 },
2004 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002005 static struct parse_tag tags_time[] = {
2006 { .tag = 's', .mult = 1 },
2007 { .tag = 'm', .mult = 60 },
2008 { .tag = 'h', .mult = 60*60 },
2009 { .tag = 'd', .mult = 60*60*24 },
2010 { .tag = 0 },
2011 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002012 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002013
2014 if (!s->set)
2015 return 0;
2016
2017 if (!strcmp(s->str, "signal")) {
2018 s->signal = true;
2019 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002020 goto enabled;
2021 }
2022
2023 val = parse_tag_value(s->str, tags_size);
2024 if (val != (unsigned long) -1) {
2025 s->size = val;
2026 pr_debug("switch-output with %s size threshold\n", s->str);
2027 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002028 }
2029
Jiri Olsabfacbe32017-01-09 10:52:00 +01002030 val = parse_tag_value(s->str, tags_time);
2031 if (val != (unsigned long) -1) {
2032 s->time = val;
2033 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2034 s->str, s->time);
2035 goto enabled;
2036 }
2037
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002038 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002039
2040enabled:
2041 rec->timestamp_filename = true;
2042 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002043
2044 if (s->size && !rec->opts.no_buffering)
2045 switch_output_size_warn(rec);
2046
Jiri Olsadc0c6122017-01-09 10:51:58 +01002047 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002048}
2049
Namhyung Kime5b2c202014-10-23 00:15:46 +09002050static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002051 "perf record [<options>] [<command>]",
2052 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002053 NULL
2054};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002055const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002056
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002057/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002058 * XXX Ideally would be local to cmd_record() and passed to a record__new
2059 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002060 * after cmd_record() exits, but since record_options need to be accessible to
2061 * builtin-script, leave it here.
2062 *
2063 * At least we don't ouch it in all the other functions here directly.
2064 *
2065 * Just say no to tons of global variables, sigh.
2066 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002067static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002068 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002069 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002070 .mmap_pages = UINT_MAX,
2071 .user_freq = UINT_MAX,
2072 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002073 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002074 .target = {
2075 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002076 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002077 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002078 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002079 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002080 .tool = {
2081 .sample = process_sample_event,
2082 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002083 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002084 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302085 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002086 .mmap = perf_event__process_mmap,
2087 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002088 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002089 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002090};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002091
Namhyung Kim76a26542015-10-22 23:28:32 +09002092const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2093 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002094
Wang Nan0aab2132016-06-16 08:02:41 +00002095static bool dry_run;
2096
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002097/*
2098 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2099 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002100 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002101 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2102 * using pipes, etc.
2103 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002104static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002105 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002106 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002107 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002108 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002109 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002110 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2111 NULL, "don't record events from perf itself",
2112 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002113 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002114 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002115 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002116 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002117 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002118 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002119 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002120 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002121 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002122 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002123 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002124 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002125 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002126 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002127 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002128 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002129 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002130 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2131 &record.opts.no_inherit_set,
2132 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002133 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2134 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002135 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002136 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002137 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2138 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002139 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2140 "profile at this frequency",
2141 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002142 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2143 "number of mmap data pages and AUX area tracing mmap pages",
2144 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002145 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2146 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2147 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002148 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002149 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002150 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002151 NULL, "enables call-graph recording" ,
2152 &record_callchain_opt),
2153 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002154 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002155 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002156 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002157 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002158 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002159 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002160 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002161 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002162 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2163 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002164 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002165 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2166 &record.opts.sample_time_set,
2167 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002168 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2169 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002170 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002171 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002172 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2173 &record.no_buildid_cache_set,
2174 "do not update the buildid cache"),
2175 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2176 &record.no_buildid_set,
2177 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002178 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002179 "monitor event in cgroup name only",
2180 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002181 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002182 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002183 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2184 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002185
2186 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2187 "branch any", "sample any taken branches",
2188 parse_branch_stack),
2189
2190 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2191 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002192 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002193 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2194 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002195 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2196 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002197 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2198 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002199 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2200 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002201 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002202 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2203 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002204 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002205 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2206 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002207 OPT_CALLBACK('k', "clockid", &record.opts,
2208 "clockid", "clockid to use for events, see clock_gettime()",
2209 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002210 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2211 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002212 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002213 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302214 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2215 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002216 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2217 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002218 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2219 "Configure all used events to run in kernel space.",
2220 PARSE_OPT_EXCLUSIVE),
2221 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2222 "Configure all used events to run in user space.",
2223 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002224 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2225 "collect kernel callchains"),
2226 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2227 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002228 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2229 "clang binary to use for compiling BPF scriptlets"),
2230 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2231 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002232 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2233 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002234 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2235 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002236 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2237 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002238 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2239 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002240 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002241 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2242 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002243 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002244 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2245 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002246 OPT_BOOLEAN(0, "dry-run", &dry_run,
2247 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002248#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002249 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2250 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002251 record__aio_parse),
2252#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002253 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2254 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2255 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002256#ifdef HAVE_ZSTD_SUPPORT
2257 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2258 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2259 record__parse_comp_level),
2260#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002261 OPT_END()
2262};
2263
Namhyung Kime5b2c202014-10-23 00:15:46 +09002264struct option *record_options = __record_options;
2265
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002266int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002267{
Adrian Hunteref149c22015-04-09 18:53:45 +03002268 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002269 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002270 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002271
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002272 setlocale(LC_ALL, "");
2273
Wang Nan48e1cab2015-12-14 10:39:22 +00002274#ifndef HAVE_LIBBPF_SUPPORT
2275# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2276 set_nobuild('\0', "clang-path", true);
2277 set_nobuild('\0', "clang-opt", true);
2278# undef set_nobuild
2279#endif
2280
He Kuang7efe0e02015-12-14 10:39:23 +00002281#ifndef HAVE_BPF_PROLOGUE
2282# if !defined (HAVE_DWARF_SUPPORT)
2283# define REASON "NO_DWARF=1"
2284# elif !defined (HAVE_LIBBPF_SUPPORT)
2285# define REASON "NO_LIBBPF=1"
2286# else
2287# define REASON "this architecture doesn't support BPF prologue"
2288# endif
2289# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2290 set_nobuild('\0', "vmlinux", true);
2291# undef set_nobuild
2292# undef REASON
2293#endif
2294
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002295 CPU_ZERO(&rec->affinity_mask);
2296 rec->opts.affinity = PERF_AFFINITY_SYS;
2297
Jiri Olsa0f98b112019-07-21 13:23:55 +02002298 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002299 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002300 return -ENOMEM;
2301
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002302 err = perf_config(perf_record_config, rec);
2303 if (err)
2304 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002305
Tom Zanussibca647a2010-11-10 08:11:30 -06002306 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002307 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002308 if (quiet)
2309 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002310
2311 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002312 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002313 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002314
Namhyung Kimbea03402012-04-26 14:15:15 +09002315 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002316 usage_with_options_msg(record_usage, record_options,
2317 "cgroup monitoring only available in system-wide mode");
2318
Stephane Eranian023695d2011-02-14 11:20:01 +02002319 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002320
2321 if (rec->opts.comp_level != 0) {
2322 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2323 rec->no_buildid = true;
2324 }
2325
Adrian Hunterb757bb02015-07-21 12:44:04 +03002326 if (rec->opts.record_switch_events &&
2327 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002328 ui__error("kernel does not support recording context switch events\n");
2329 parse_options_usage(record_usage, record_options, "switch-events", 0);
2330 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002331 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002332
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002333 if (switch_output_setup(rec)) {
2334 parse_options_usage(record_usage, record_options, "switch-output", 0);
2335 return -EINVAL;
2336 }
2337
Jiri Olsabfacbe32017-01-09 10:52:00 +01002338 if (rec->switch_output.time) {
2339 signal(SIGALRM, alarm_sig_handler);
2340 alarm(rec->switch_output.time);
2341 }
2342
Andi Kleen03724b22019-03-14 15:49:55 -07002343 if (rec->switch_output.num_files) {
2344 rec->switch_output.filenames = calloc(sizeof(char *),
2345 rec->switch_output.num_files);
2346 if (!rec->switch_output.filenames)
2347 return -EINVAL;
2348 }
2349
Adrian Hunter1b36c032016-09-23 17:38:39 +03002350 /*
2351 * Allow aliases to facilitate the lookup of symbols for address
2352 * filters. Refer to auxtrace_parse_filters().
2353 */
2354 symbol_conf.allow_aliases = true;
2355
2356 symbol__init(NULL);
2357
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002358 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002359 if (err)
2360 goto out;
2361
Wang Nan0aab2132016-06-16 08:02:41 +00002362 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002363 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002364
Wang Nand7888572016-04-08 15:07:24 +00002365 err = bpf__setup_stdout(rec->evlist);
2366 if (err) {
2367 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2368 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2369 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002370 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002371 }
2372
Adrian Hunteref149c22015-04-09 18:53:45 +03002373 err = -ENOMEM;
2374
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002375 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002376 pr_warning(
2377"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
Igor Lubashevd06e5fa2019-08-26 21:39:16 -04002378"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002379"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2380"file is not found in the buildid cache or in the vmlinux path.\n\n"
2381"Samples in kernel modules won't be resolved at all.\n\n"
2382"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2383"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002384
Wang Nan0c1d46a2016-04-20 18:59:52 +00002385 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002386 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002387 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002388 /*
2389 * In 'perf record --switch-output', disable buildid
2390 * generation by default to reduce data file switching
2391 * overhead. Still generate buildid if they are required
2392 * explicitly using
2393 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002394 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002395 * --no-no-buildid-cache
2396 *
2397 * Following code equals to:
2398 *
2399 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2400 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2401 * disable_buildid_cache();
2402 */
2403 bool disable = true;
2404
2405 if (rec->no_buildid_set && !rec->no_buildid)
2406 disable = false;
2407 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2408 disable = false;
2409 if (disable) {
2410 rec->no_buildid = true;
2411 rec->no_buildid_cache = true;
2412 disable_buildid_cache();
2413 }
2414 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002415
Wang Nan4ea648a2016-07-14 08:34:47 +00002416 if (record.opts.overwrite)
2417 record.opts.tail_synthesize = true;
2418
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002419 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002420 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002421 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002422 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002423 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002424
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002425 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2426 rec->opts.no_inherit = true;
2427
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002428 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002429 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002430 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002431 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002432 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002433
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002434 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002435 if (err) {
2436 int saved_errno = errno;
2437
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002438 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002439 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002440
2441 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002442 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002443 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002444
Mengting Zhangca800062017-12-13 15:01:53 +08002445 /* Enable ignoring missing threads when -u/-p option is defined. */
2446 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002447
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002448 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002449 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002450 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002451
Adrian Hunteref149c22015-04-09 18:53:45 +03002452 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2453 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002454 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002455
Namhyung Kim61566812016-01-11 22:37:09 +09002456 /*
2457 * We take all buildids when the file contains
2458 * AUX area tracing data because we do not decode the
2459 * trace because it would take too long.
2460 */
2461 if (rec->opts.full_auxtrace)
2462 rec->buildid_all = true;
2463
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002464 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002465 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002466 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002467 }
2468
Alexey Budankov93f20c02018-11-06 12:07:19 +03002469 if (rec->opts.nr_cblocks > nr_cblocks_max)
2470 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002471 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002472
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002473 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002474 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002475
Alexey Budankov51255a82019-03-18 20:42:19 +03002476 if (rec->opts.comp_level > comp_level_max)
2477 rec->opts.comp_level = comp_level_max;
2478 pr_debug("comp level: %d\n", rec->opts.comp_level);
2479
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002480 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002481out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002482 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002483 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002484 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002485 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002486}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002487
2488static void snapshot_sig_handler(int sig __maybe_unused)
2489{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002490 struct record *rec = &record;
2491
Wang Nan5f9cf592016-04-20 18:59:49 +00002492 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2493 trigger_hit(&auxtrace_snapshot_trigger);
2494 auxtrace_record__snapshot_started = 1;
2495 if (auxtrace_record__snapshot_start(record.itr))
2496 trigger_error(&auxtrace_snapshot_trigger);
2497 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002498
Jiri Olsadc0c6122017-01-09 10:51:58 +01002499 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002500 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002501}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002502
2503static void alarm_sig_handler(int sig __maybe_unused)
2504{
2505 struct record *rec = &record;
2506
2507 if (switch_output_time(rec))
2508 trigger_hit(&switch_output_trigger);
2509}