blob: e4efdbf1a81e939c4395f7bc161c6cbb37d6c7e2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -030023#include "util/mmap.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030024#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020025#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020026#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020027#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030028#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Arnaldo Carvalho de Melo40c7d242020-05-05 11:49:08 -030037#include "util/perf_api_probe.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030042#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030043#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030044#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030045#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080046#include "util/bpf-event.h"
Stephane Eraniand99c22e2020-04-22 08:50:38 -070047#include "util/util.h"
Wang Nand8871ea2016-02-26 09:32:06 +000048#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030049#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020050
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030051#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030052#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030053#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <poll.h>
Stephane Eraniand99c22e2020-04-22 08:50:38 -070055#include <pthread.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020056#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020057#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030058#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030059#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030060#include <sys/wait.h>
Adrian Huntereeb399b2019-10-04 11:31:21 +030061#include <sys/types.h>
62#include <sys/stat.h>
63#include <fcntl.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053064#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030065#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030066#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030067#include <linux/zalloc.h>
Alexey Budankov8384a262019-12-03 14:45:27 +030068#include <linux/bitmap.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030069
Jiri Olsa1b43b702017-01-09 10:51:56 +010070struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010071 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010072 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010073 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010074 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010075 const char *str;
76 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070077 char **filenames;
78 int num_files;
79 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010080};
81
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030082struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020083 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030084 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020085 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010086 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030087 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020088 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020089 struct perf_session *session;
Arnaldo Carvalho de Melobc477d792020-04-24 10:24:04 -030090 struct evlist *sb_evlist;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -030091 pthread_t thread_id;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020092 int realtime_prio;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -030093 bool switch_output_event_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020094 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000095 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020096 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000097 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090098 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000099 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +0800100 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +0100101 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -0700102 unsigned long long samples;
Alexey Budankov8384a262019-12-03 14:45:27 +0300103 struct mmap_cpu_mask affinity_mask;
Jiwei Sun6d575812019-10-22 16:09:01 +0800104 unsigned long output_max_size; /* = 0: unlimited */
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200105};
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200106
Jiwei Sun6d575812019-10-22 16:09:01 +0800107static volatile int done;
108
Jiri Olsadc0c6122017-01-09 10:51:58 +0100109static volatile int auxtrace_record__snapshot_started;
110static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
111static DEFINE_TRIGGER(switch_output_trigger);
112
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300113static const char *affinity_tags[PERF_AFFINITY_MAX] = {
114 "SYS", "NODE", "CPU"
115};
116
Jiri Olsadc0c6122017-01-09 10:51:58 +0100117static bool switch_output_signal(struct record *rec)
118{
119 return rec->switch_output.signal &&
120 trigger_is_ready(&switch_output_trigger);
121}
122
123static bool switch_output_size(struct record *rec)
124{
125 return rec->switch_output.size &&
126 trigger_is_ready(&switch_output_trigger) &&
127 (rec->bytes_written >= rec->switch_output.size);
128}
129
Jiri Olsabfacbe32017-01-09 10:52:00 +0100130static bool switch_output_time(struct record *rec)
131{
132 return rec->switch_output.time &&
133 trigger_is_ready(&switch_output_trigger);
134}
135
Jiwei Sun6d575812019-10-22 16:09:01 +0800136static bool record__output_max_size_exceeded(struct record *rec)
137{
138 return rec->output_max_size &&
139 (rec->bytes_written >= rec->output_max_size);
140}
141
Jiri Olsaa5830532019-07-27 20:30:53 +0200142static int record__write(struct record *rec, struct mmap *map __maybe_unused,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200143 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200144{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200145 struct perf_data_file *file = &rec->session->data->file;
146
147 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100148 pr_err("failed to write perf data, error: %m\n");
149 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200150 }
David Ahern8d3eca22012-08-26 12:24:47 -0600151
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300152 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100153
Jiwei Sun6d575812019-10-22 16:09:01 +0800154 if (record__output_max_size_exceeded(rec) && !done) {
155 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
156 " stopping session ]\n",
157 rec->bytes_written >> 10);
158 done = 1;
159 }
160
Jiri Olsadc0c6122017-01-09 10:51:58 +0100161 if (switch_output_size(rec))
162 trigger_hit(&switch_output_trigger);
163
David Ahern8d3eca22012-08-26 12:24:47 -0600164 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200165}
166
Alexey Budankovef781122019-03-18 20:44:12 +0300167static int record__aio_enabled(struct record *rec);
168static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300169static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
170 void *src, size_t src_size);
171
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300172#ifdef HAVE_AIO_SUPPORT
173static int record__aio_write(struct aiocb *cblock, int trace_fd,
174 void *buf, size_t size, off_t off)
175{
176 int rc;
177
178 cblock->aio_fildes = trace_fd;
179 cblock->aio_buf = buf;
180 cblock->aio_nbytes = size;
181 cblock->aio_offset = off;
182 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
183
184 do {
185 rc = aio_write(cblock);
186 if (rc == 0) {
187 break;
188 } else if (errno != EAGAIN) {
189 cblock->aio_fildes = -1;
190 pr_err("failed to queue perf data, error: %m\n");
191 break;
192 }
193 } while (1);
194
195 return rc;
196}
197
Jiri Olsaa5830532019-07-27 20:30:53 +0200198static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300199{
200 void *rem_buf;
201 off_t rem_off;
202 size_t rem_size;
203 int rc, aio_errno;
204 ssize_t aio_ret, written;
205
206 aio_errno = aio_error(cblock);
207 if (aio_errno == EINPROGRESS)
208 return 0;
209
210 written = aio_ret = aio_return(cblock);
211 if (aio_ret < 0) {
212 if (aio_errno != EINTR)
213 pr_err("failed to write perf data, error: %m\n");
214 written = 0;
215 }
216
217 rem_size = cblock->aio_nbytes - written;
218
219 if (rem_size == 0) {
220 cblock->aio_fildes = -1;
221 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300222 * md->refcount is incremented in record__aio_pushfn() for
223 * every aio write request started in record__aio_push() so
224 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300225 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200226 perf_mmap__put(&md->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300227 rc = 1;
228 } else {
229 /*
230 * aio write request may require restart with the
231 * reminder if the kernel didn't write whole
232 * chunk at once.
233 */
234 rem_off = cblock->aio_offset + written;
235 rem_buf = (void *)(cblock->aio_buf + written);
236 record__aio_write(cblock, cblock->aio_fildes,
237 rem_buf, rem_size, rem_off);
238 rc = 0;
239 }
240
241 return rc;
242}
243
Jiri Olsaa5830532019-07-27 20:30:53 +0200244static int record__aio_sync(struct mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300245{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300246 struct aiocb **aiocb = md->aio.aiocb;
247 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300248 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300249 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300250
251 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300252 do_suspend = 0;
253 for (i = 0; i < md->aio.nr_cblocks; ++i) {
254 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
255 if (sync_all)
256 aiocb[i] = NULL;
257 else
258 return i;
259 } else {
260 /*
261 * Started aio write is not complete yet
262 * so it has to be waited before the
263 * next allocation.
264 */
265 aiocb[i] = &cblocks[i];
266 do_suspend = 1;
267 }
268 }
269 if (!do_suspend)
270 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300271
Alexey Budankov93f20c02018-11-06 12:07:19 +0300272 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300273 if (!(errno == EAGAIN || errno == EINTR))
274 pr_err("failed to sync perf data, error: %m\n");
275 }
276 } while (1);
277}
278
Alexey Budankovef781122019-03-18 20:44:12 +0300279struct record_aio {
280 struct record *rec;
281 void *data;
282 size_t size;
283};
284
Jiri Olsaa5830532019-07-27 20:30:53 +0200285static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300286{
Alexey Budankovef781122019-03-18 20:44:12 +0300287 struct record_aio *aio = to;
288
289 /*
Jiri Olsa547740f2019-07-27 22:07:44 +0200290 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
Alexey Budankovef781122019-03-18 20:44:12 +0300291 * to release space in the kernel buffer as fast as possible, calling
292 * perf_mmap__consume() from perf_mmap__push() function.
293 *
294 * That lets the kernel to proceed with storing more profiling data into
295 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
296 *
297 * Coping can be done in two steps in case the chunk of profiling data
298 * crosses the upper bound of the kernel buffer. In this case we first move
299 * part of data from map->start till the upper bound and then the reminder
300 * from the beginning of the kernel buffer till the end of the data chunk.
301 */
302
303 if (record__comp_enabled(aio->rec)) {
304 size = zstd_compress(aio->rec->session, aio->data + aio->size,
Jiri Olsabf59b302019-10-07 14:53:11 +0200305 mmap__mmap_len(map) - aio->size,
Alexey Budankovef781122019-03-18 20:44:12 +0300306 buf, size);
307 } else {
308 memcpy(aio->data + aio->size, buf, size);
309 }
310
311 if (!aio->size) {
312 /*
313 * Increment map->refcount to guard map->aio.data[] buffer
314 * from premature deallocation because map object can be
315 * released earlier than aio write request started on
316 * map->aio.data[] buffer is complete.
317 *
318 * perf_mmap__put() is done at record__aio_complete()
319 * after started aio request completion or at record__aio_push()
320 * if the request failed to start.
321 */
Jiri Olsae75710f2019-10-07 14:53:13 +0200322 perf_mmap__get(&map->core);
Alexey Budankovef781122019-03-18 20:44:12 +0300323 }
324
325 aio->size += size;
326
327 return size;
328}
329
Jiri Olsaa5830532019-07-27 20:30:53 +0200330static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
Alexey Budankovef781122019-03-18 20:44:12 +0300331{
332 int ret, idx;
333 int trace_fd = rec->session->data->file.fd;
334 struct record_aio aio = { .rec = rec, .size = 0 };
335
336 /*
337 * Call record__aio_sync() to wait till map->aio.data[] buffer
338 * becomes available after previous aio write operation.
339 */
340
341 idx = record__aio_sync(map, false);
342 aio.data = map->aio.data[idx];
343 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
344 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
345 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300346
347 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300348 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300349 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300350 *off += aio.size;
351 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300352 if (switch_output_size(rec))
353 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300354 } else {
355 /*
356 * Decrement map->refcount incremented in record__aio_pushfn()
357 * back if record__aio_write() operation failed to start, otherwise
358 * map->refcount is decremented in record__aio_complete() after
359 * aio write operation finishes successfully.
360 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200361 perf_mmap__put(&map->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300362 }
363
364 return ret;
365}
366
367static off_t record__aio_get_pos(int trace_fd)
368{
369 return lseek(trace_fd, 0, SEEK_CUR);
370}
371
372static void record__aio_set_pos(int trace_fd, off_t pos)
373{
374 lseek(trace_fd, pos, SEEK_SET);
375}
376
377static void record__aio_mmap_read_sync(struct record *rec)
378{
379 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200380 struct evlist *evlist = rec->evlist;
Jiri Olsaa5830532019-07-27 20:30:53 +0200381 struct mmap *maps = evlist->mmap;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300382
Alexey Budankovef781122019-03-18 20:44:12 +0300383 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300384 return;
385
Jiri Olsac976ee12019-07-30 13:04:59 +0200386 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200387 struct mmap *map = &maps[i];
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300388
Jiri Olsa547740f2019-07-27 22:07:44 +0200389 if (map->core.base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300390 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300391 }
392}
393
394static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300395static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300396
397static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300398 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300399 int unset)
400{
401 struct record_opts *opts = (struct record_opts *)opt->value;
402
Alexey Budankov93f20c02018-11-06 12:07:19 +0300403 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300404 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300405 } else {
406 if (str)
407 opts->nr_cblocks = strtol(str, NULL, 0);
408 if (!opts->nr_cblocks)
409 opts->nr_cblocks = nr_cblocks_default;
410 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300411
412 return 0;
413}
414#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300415static int nr_cblocks_max = 0;
416
Jiri Olsaa5830532019-07-27 20:30:53 +0200417static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
Alexey Budankovef781122019-03-18 20:44:12 +0300418 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300419{
420 return -1;
421}
422
423static off_t record__aio_get_pos(int trace_fd __maybe_unused)
424{
425 return -1;
426}
427
428static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
429{
430}
431
432static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
433{
434}
435#endif
436
437static int record__aio_enabled(struct record *rec)
438{
439 return rec->opts.nr_cblocks > 0;
440}
441
Alexey Budankov470530b2019-03-18 20:40:26 +0300442#define MMAP_FLUSH_DEFAULT 1
443static int record__mmap_flush_parse(const struct option *opt,
444 const char *str,
445 int unset)
446{
447 int flush_max;
448 struct record_opts *opts = (struct record_opts *)opt->value;
449 static struct parse_tag tags[] = {
450 { .tag = 'B', .mult = 1 },
451 { .tag = 'K', .mult = 1 << 10 },
452 { .tag = 'M', .mult = 1 << 20 },
453 { .tag = 'G', .mult = 1 << 30 },
454 { .tag = 0 },
455 };
456
457 if (unset)
458 return 0;
459
460 if (str) {
461 opts->mmap_flush = parse_tag_value(str, tags);
462 if (opts->mmap_flush == (int)-1)
463 opts->mmap_flush = strtol(str, NULL, 0);
464 }
465
466 if (!opts->mmap_flush)
467 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
468
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200469 flush_max = evlist__mmap_size(opts->mmap_pages);
Alexey Budankov470530b2019-03-18 20:40:26 +0300470 flush_max /= 4;
471 if (opts->mmap_flush > flush_max)
472 opts->mmap_flush = flush_max;
473
474 return 0;
475}
476
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300477#ifdef HAVE_ZSTD_SUPPORT
478static unsigned int comp_level_default = 1;
479
480static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
481{
482 struct record_opts *opts = opt->value;
483
484 if (unset) {
485 opts->comp_level = 0;
486 } else {
487 if (str)
488 opts->comp_level = strtol(str, NULL, 0);
489 if (!opts->comp_level)
490 opts->comp_level = comp_level_default;
491 }
492
493 return 0;
494}
495#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300496static unsigned int comp_level_max = 22;
497
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300498static int record__comp_enabled(struct record *rec)
499{
500 return rec->opts.comp_level > 0;
501}
502
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200503static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200504 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300505 struct perf_sample *sample __maybe_unused,
506 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200507{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300508 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200509 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200510}
511
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700512static int process_locked_synthesized_event(struct perf_tool *tool,
513 union perf_event *event,
514 struct perf_sample *sample __maybe_unused,
515 struct machine *machine __maybe_unused)
516{
517 static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
518 int ret;
519
520 pthread_mutex_lock(&synth_lock);
521 ret = process_synthesized_event(tool, event, sample, machine);
522 pthread_mutex_unlock(&synth_lock);
523 return ret;
524}
525
Jiri Olsaa5830532019-07-27 20:30:53 +0200526static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300527{
528 struct record *rec = to;
529
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300530 if (record__comp_enabled(rec)) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200531 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300532 bf = map->data;
533 }
534
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300535 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200536 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300537}
538
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300539static volatile int signr = -1;
540static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000541
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300542static void sig_handler(int sig)
543{
544 if (sig == SIGCHLD)
545 child_finished = 1;
546 else
547 signr = sig;
548
549 done = 1;
550}
551
Wang Nana0748652016-11-26 07:03:28 +0000552static void sigsegv_handler(int sig)
553{
554 perf_hooks__recover();
555 sighandler_dump_stack(sig);
556}
557
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300558static void record__sig_exit(void)
559{
560 if (signr == -1)
561 return;
562
563 signal(signr, SIG_DFL);
564 raise(signr);
565}
566
Adrian Huntere31f0d02015-04-30 17:37:27 +0300567#ifdef HAVE_AUXTRACE_SUPPORT
568
Adrian Hunteref149c22015-04-09 18:53:45 +0300569static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaa5830532019-07-27 20:30:53 +0200570 struct mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300571 union perf_event *event, void *data1,
572 size_t len1, void *data2, size_t len2)
573{
574 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100575 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300576 size_t padding;
577 u8 pad[8] = {0};
578
Adrian Hunter46e201e2019-10-04 11:31:20 +0300579 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300580 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100581 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300582 int err;
583
584 file_offset = lseek(fd, 0, SEEK_CUR);
585 if (file_offset == -1)
586 return -1;
587 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
588 event, file_offset);
589 if (err)
590 return err;
591 }
592
Adrian Hunteref149c22015-04-09 18:53:45 +0300593 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
594 padding = (len1 + len2) & 7;
595 if (padding)
596 padding = 8 - padding;
597
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200598 record__write(rec, map, event, event->header.size);
599 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300600 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200601 record__write(rec, map, data2, len2);
602 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300603
604 return 0;
605}
606
607static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200608 struct mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300609{
610 int ret;
611
Jiri Olsae035f4c2018-09-13 14:54:05 +0200612 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300613 record__process_auxtrace);
614 if (ret < 0)
615 return ret;
616
617 if (ret)
618 rec->samples++;
619
620 return 0;
621}
622
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300623static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200624 struct mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300625{
626 int ret;
627
Jiri Olsae035f4c2018-09-13 14:54:05 +0200628 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300629 record__process_auxtrace,
630 rec->opts.auxtrace_snapshot_size);
631 if (ret < 0)
632 return ret;
633
634 if (ret)
635 rec->samples++;
636
637 return 0;
638}
639
640static int record__auxtrace_read_snapshot_all(struct record *rec)
641{
642 int i;
643 int rc = 0;
644
Jiri Olsac976ee12019-07-30 13:04:59 +0200645 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200646 struct mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300647
Jiri Olsae035f4c2018-09-13 14:54:05 +0200648 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300649 continue;
650
Jiri Olsae035f4c2018-09-13 14:54:05 +0200651 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300652 rc = -1;
653 goto out;
654 }
655 }
656out:
657 return rc;
658}
659
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300660static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300661{
662 pr_debug("Recording AUX area tracing snapshot\n");
663 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000664 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300665 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300666 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000667 trigger_error(&auxtrace_snapshot_trigger);
668 else
669 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300670 }
671}
672
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300673static int record__auxtrace_snapshot_exit(struct record *rec)
674{
675 if (trigger_is_error(&auxtrace_snapshot_trigger))
676 return 0;
677
678 if (!auxtrace_record__snapshot_started &&
679 auxtrace_record__snapshot_start(rec->itr))
680 return -1;
681
682 record__read_auxtrace_snapshot(rec, true);
683 if (trigger_is_error(&auxtrace_snapshot_trigger))
684 return -1;
685
686 return 0;
687}
688
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200689static int record__auxtrace_init(struct record *rec)
690{
691 int err;
692
693 if (!rec->itr) {
694 rec->itr = auxtrace_record__init(rec->evlist, &err);
695 if (err)
696 return err;
697 }
698
699 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
700 rec->opts.auxtrace_snapshot_opts);
701 if (err)
702 return err;
703
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200704 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
705 rec->opts.auxtrace_sample_opts);
706 if (err)
707 return err;
708
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200709 return auxtrace_parse_filters(rec->evlist);
710}
711
Adrian Huntere31f0d02015-04-30 17:37:27 +0300712#else
713
714static inline
715int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsaa5830532019-07-27 20:30:53 +0200716 struct mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300717{
718 return 0;
719}
720
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300721static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300722void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
723 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300724{
725}
726
727static inline
728int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
729{
730 return 0;
731}
732
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300733static inline
734int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
735{
736 return 0;
737}
738
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200739static int record__auxtrace_init(struct record *rec __maybe_unused)
740{
741 return 0;
742}
743
Adrian Huntere31f0d02015-04-30 17:37:27 +0300744#endif
745
Adrian Huntereeb399b2019-10-04 11:31:21 +0300746static bool record__kcore_readable(struct machine *machine)
747{
748 char kcore[PATH_MAX];
749 int fd;
750
751 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
752
753 fd = open(kcore, O_RDONLY);
754 if (fd < 0)
755 return false;
756
757 close(fd);
758
759 return true;
760}
761
762static int record__kcore_copy(struct machine *machine, struct perf_data *data)
763{
764 char from_dir[PATH_MAX];
765 char kcore_dir[PATH_MAX];
766 int ret;
767
768 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
769
770 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
771 if (ret)
772 return ret;
773
774 return kcore_copy(from_dir, kcore_dir);
775}
776
Wang Nancda57a82016-06-27 10:24:03 +0000777static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200778 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000779{
780 struct record_opts *opts = &rec->opts;
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200781 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
782 opts->auxtrace_sample_mode;
Wang Nancda57a82016-06-27 10:24:03 +0000783 char msg[512];
784
Alexey Budankovf13de662019-01-22 20:50:57 +0300785 if (opts->affinity != PERF_AFFINITY_SYS)
786 cpu__setup_cpunode_map();
787
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200788 if (evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000789 opts->auxtrace_mmap_pages,
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200790 auxtrace_overwrite,
Alexey Budankov470530b2019-03-18 20:40:26 +0300791 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300792 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000793 if (errno == EPERM) {
794 pr_err("Permission error mapping pages.\n"
795 "Consider increasing "
796 "/proc/sys/kernel/perf_event_mlock_kb,\n"
797 "or try again with a smaller value of -m/--mmap_pages.\n"
798 "(current value: %u,%u)\n",
799 opts->mmap_pages, opts->auxtrace_mmap_pages);
800 return -errno;
801 } else {
802 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300803 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000804 if (errno)
805 return -errno;
806 else
807 return -EINVAL;
808 }
809 }
810 return 0;
811}
812
813static int record__mmap(struct record *rec)
814{
815 return record__mmap_evlist(rec, rec->evlist);
816}
817
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300818static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200819{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300820 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200821 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200822 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200823 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300824 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600825 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200826
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300827 /*
828 * For initial_delay we need to add a dummy event so that we can track
829 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
830 * real events, the ones asked by the user.
831 */
832 if (opts->initial_delay) {
833 if (perf_evlist__add_dummy(evlist))
834 return -ENOMEM;
835
Jiri Olsa515dbe42019-09-03 10:39:52 +0200836 pos = evlist__first(evlist);
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300837 pos->tracking = 0;
Jiri Olsa515dbe42019-09-03 10:39:52 +0200838 pos = evlist__last(evlist);
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300839 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200840 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300841 }
842
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300843 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100844
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300845 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200846try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200847 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Meloae430892020-04-30 11:46:15 -0300848 if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900849 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300850 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300851 goto try_again;
852 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700853 if ((errno == EINVAL || errno == EBADF) &&
854 pos->leader != pos &&
855 pos->weak_group) {
Andi Kleen4804e012019-11-20 16:15:19 -0800856 pos = perf_evlist__reset_weak_group(evlist, pos, true);
Andi Kleencf99ad12018-10-01 12:59:27 -0700857 goto try_again;
858 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300859 rc = -errno;
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -0300860 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300861 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600862 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300863 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800864
865 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800866 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200867
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -0300868 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
869 pr_warning(
870"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
871"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
872"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
873"file is not found in the buildid cache or in the vmlinux path.\n\n"
874"Samples in kernel modules won't be resolved at all.\n\n"
875"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
876"even with a suitable vmlinux or kallsyms file.\n\n");
877 }
878
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300879 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300880 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300881 pos->filter, evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300882 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600883 rc = -1;
884 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100885 }
886
Wang Nancda57a82016-06-27 10:24:03 +0000887 rc = record__mmap(rec);
888 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600889 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200890
Jiri Olsa563aecb2013-06-05 13:35:06 +0200891 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300892 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600893out:
894 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200895}
896
Namhyung Kime3d59112015-01-29 17:06:44 +0900897static int process_sample_event(struct perf_tool *tool,
898 union perf_event *event,
899 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200900 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900901 struct machine *machine)
902{
903 struct record *rec = container_of(tool, struct record, tool);
904
Jin Yao68588ba2017-12-08 21:13:42 +0800905 if (rec->evlist->first_sample_time == 0)
906 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900907
Jin Yao68588ba2017-12-08 21:13:42 +0800908 rec->evlist->last_sample_time = sample->time;
909
910 if (rec->buildid_all)
911 return 0;
912
913 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900914 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
915}
916
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300917static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200918{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200919 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200920
Jiri Olsa45112e82019-02-21 10:41:29 +0100921 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300922 return 0;
923
Namhyung Kim00dc8652014-11-04 10:14:32 +0900924 /*
925 * During this process, it'll load kernel map and replace the
926 * dso->long_name to a real pathname it found. In this case
927 * we prefer the vmlinux path like
928 * /lib/modules/3.16.4/build/vmlinux
929 *
930 * rather than build-id path (in debug directory).
931 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
932 */
933 symbol_conf.ignore_vmlinux_buildid = true;
934
Namhyung Kim61566812016-01-11 22:37:09 +0900935 /*
936 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800937 * so no need to process samples. But if timestamp_boundary is enabled,
938 * it still needs to walk on all samples to get the timestamps of
939 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900940 */
Jin Yao68588ba2017-12-08 21:13:42 +0800941 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900942 rec->tool.sample = NULL;
943
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300944 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200945}
946
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200947static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800948{
949 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200950 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800951 /*
952 *As for guest kernel when processing subcommand record&report,
953 *we arrange module mmap prior to guest kernel mmap and trigger
954 *a preload dso because default guest module symbols are loaded
955 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
956 *method is used to avoid symbol missing when the first addr is
957 *in module instead of in guest kernel.
958 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200959 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200960 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800961 if (err < 0)
962 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300963 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800964
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800965 /*
966 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
967 * have no _text sometimes.
968 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200969 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200970 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800971 if (err < 0)
972 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300973 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800974}
975
Frederic Weisbecker98402802010-05-02 22:05:29 +0200976static struct perf_event_header finished_round_event = {
977 .size = sizeof(struct perf_event_header),
978 .type = PERF_RECORD_FINISHED_ROUND,
979};
980
Jiri Olsaa5830532019-07-27 20:30:53 +0200981static void record__adjust_affinity(struct record *rec, struct mmap *map)
Alexey Budankovf13de662019-01-22 20:50:57 +0300982{
983 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
Alexey Budankov8384a262019-12-03 14:45:27 +0300984 !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
985 rec->affinity_mask.nbits)) {
986 bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
987 bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
988 map->affinity_mask.bits, rec->affinity_mask.nbits);
989 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
990 (cpu_set_t *)rec->affinity_mask.bits);
991 if (verbose == 2)
992 mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
Alexey Budankovf13de662019-01-22 20:50:57 +0300993 }
994}
995
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300996static size_t process_comp_header(void *record, size_t increment)
997{
Jiri Olsa72932372019-08-28 15:57:16 +0200998 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300999 size_t size = sizeof(*event);
1000
1001 if (increment) {
1002 event->header.size += increment;
1003 return increment;
1004 }
1005
1006 event->header.type = PERF_RECORD_COMPRESSED;
1007 event->header.size = size;
1008
1009 return size;
1010}
1011
1012static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
1013 void *src, size_t src_size)
1014{
1015 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +02001016 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001017
1018 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
1019 max_record_size, process_comp_header);
1020
1021 session->bytes_transferred += src_size;
1022 session->bytes_compressed += compressed;
1023
1024 return compressed;
1025}
1026
Jiri Olsa63503db2019-07-21 13:23:52 +02001027static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +03001028 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +02001029{
Jiri Olsadcabb502014-07-25 16:56:16 +02001030 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +02001031 int i;
David Ahern8d3eca22012-08-26 12:24:47 -06001032 int rc = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001033 struct mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001034 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +03001035 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001036
Wang Nancb216862016-06-27 10:24:04 +00001037 if (!evlist)
1038 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +03001039
Wang Nan0b72d692017-12-04 16:51:07 +00001040 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +00001041 if (!maps)
1042 return 0;
Wang Nancb216862016-06-27 10:24:04 +00001043
Wang Nan0b72d692017-12-04 16:51:07 +00001044 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +00001045 return 0;
1046
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001047 if (record__aio_enabled(rec))
1048 off = record__aio_get_pos(trace_fd);
1049
Jiri Olsac976ee12019-07-30 13:04:59 +02001050 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001051 u64 flush = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001052 struct mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +00001053
Jiri Olsa547740f2019-07-27 22:07:44 +02001054 if (map->core.base) {
Alexey Budankovf13de662019-01-22 20:50:57 +03001055 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +03001056 if (synch) {
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001057 flush = map->core.flush;
1058 map->core.flush = 1;
Alexey Budankov470530b2019-03-18 20:40:26 +03001059 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001060 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +03001061 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001062 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001063 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001064 rc = -1;
1065 goto out;
1066 }
1067 } else {
Alexey Budankovef781122019-03-18 20:44:12 +03001068 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001069 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +03001070 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001071 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001072 rc = -1;
1073 goto out;
1074 }
David Ahern8d3eca22012-08-26 12:24:47 -06001075 }
Alexey Budankov470530b2019-03-18 20:40:26 +03001076 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001077 map->core.flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -06001078 }
Adrian Hunteref149c22015-04-09 18:53:45 +03001079
Jiri Olsae035f4c2018-09-13 14:54:05 +02001080 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001081 !rec->opts.auxtrace_sample_mode &&
Jiri Olsae035f4c2018-09-13 14:54:05 +02001082 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001083 rc = -1;
1084 goto out;
1085 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001086 }
1087
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001088 if (record__aio_enabled(rec))
1089 record__aio_set_pos(trace_fd, off);
1090
Jiri Olsadcabb502014-07-25 16:56:16 +02001091 /*
1092 * Mark the round finished in case we wrote
1093 * at least one event.
1094 */
1095 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001096 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001097
Wang Nan0b72d692017-12-04 16:51:07 +00001098 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001099 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001100out:
1101 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001102}
1103
Alexey Budankov470530b2019-03-18 20:40:26 +03001104static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001105{
1106 int err;
1107
Alexey Budankov470530b2019-03-18 20:40:26 +03001108 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001109 if (err)
1110 return err;
1111
Alexey Budankov470530b2019-03-18 20:40:26 +03001112 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001113}
1114
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001115static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001116{
David Ahern57706ab2013-11-06 11:41:34 -07001117 struct perf_session *session = rec->session;
1118 int feat;
1119
1120 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1121 perf_header__set_feat(&session->header, feat);
1122
1123 if (rec->no_buildid)
1124 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1125
Jiri Olsace9036a2019-07-21 13:24:23 +02001126 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001127 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1128
1129 if (!rec->opts.branch_stack)
1130 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001131
1132 if (!rec->opts.full_auxtrace)
1133 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001134
Alexey Budankovcf790512018-10-09 17:36:24 +03001135 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1136 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1137
Jiri Olsa258031c2019-03-08 14:47:39 +01001138 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001139 if (!record__comp_enabled(rec))
1140 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001141
Jiri Olsaffa517a2015-10-25 15:51:43 +01001142 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001143}
1144
Wang Nane1ab48b2016-02-26 09:32:10 +00001145static void
1146record__finish_output(struct record *rec)
1147{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001148 struct perf_data *data = &rec->data;
1149 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001150
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001151 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001152 return;
1153
1154 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001155 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001156
1157 if (!rec->no_buildid) {
1158 process_buildids(rec);
1159
1160 if (rec->buildid_all)
1161 dsos__hit_all(rec->session);
1162 }
1163 perf_session__write_header(rec->session, rec->evlist, fd, true);
1164
1165 return;
1166}
1167
Wang Nan4ea648a2016-07-14 08:34:47 +00001168static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001169{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001170 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001171 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001172
Wang Nan4ea648a2016-07-14 08:34:47 +00001173 if (rec->opts.tail_synthesize != tail)
1174 return 0;
1175
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001176 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1177 if (thread_map == NULL)
1178 return -1;
1179
1180 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001181 process_synthesized_event,
1182 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001183 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001184 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001185 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001186}
1187
Wang Nan4ea648a2016-07-14 08:34:47 +00001188static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001189
Wang Nanecfd7a92016-04-13 08:21:07 +00001190static int
1191record__switch_output(struct record *rec, bool at_exit)
1192{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001193 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001194 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001195 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001196
1197 /* Same Size: "2015122520103046"*/
1198 char timestamp[] = "InvalidTimestamp";
1199
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001200 record__aio_mmap_read_sync(rec);
1201
Wang Nan4ea648a2016-07-14 08:34:47 +00001202 record__synthesize(rec, true);
1203 if (target__none(&rec->opts.target))
1204 record__synthesize_workload(rec, true);
1205
Wang Nanecfd7a92016-04-13 08:21:07 +00001206 rec->samples = 0;
1207 record__finish_output(rec);
1208 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1209 if (err) {
1210 pr_err("Failed to get current timestamp\n");
1211 return -EINVAL;
1212 }
1213
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001214 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001215 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001216 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001217 if (fd >= 0 && !at_exit) {
1218 rec->bytes_written = 0;
1219 rec->session->header.data_size = 0;
1220 }
1221
1222 if (!quiet)
1223 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001224 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001225
Andi Kleen03724b22019-03-14 15:49:55 -07001226 if (rec->switch_output.num_files) {
1227 int n = rec->switch_output.cur_file + 1;
1228
1229 if (n >= rec->switch_output.num_files)
1230 n = 0;
1231 rec->switch_output.cur_file = n;
1232 if (rec->switch_output.filenames[n]) {
1233 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001234 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001235 }
1236 rec->switch_output.filenames[n] = new_filename;
1237 } else {
1238 free(new_filename);
1239 }
1240
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001241 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001242 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001243 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001244
Wang Nanbe7b0c92016-04-20 18:59:54 +00001245 /*
1246 * In 'perf record --switch-output' without -a,
1247 * record__synthesize() in record__switch_output() won't
1248 * generate tracking events because there's no thread_map
1249 * in evlist. Which causes newly created perf.data doesn't
1250 * contain map and comm information.
1251 * Create a fake thread_map and directly call
1252 * perf_event__synthesize_thread_map() for those events.
1253 */
1254 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001255 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001256 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001257 return fd;
1258}
1259
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001260static volatile int workload_exec_errno;
1261
1262/*
1263 * perf_evlist__prepare_workload will send a SIGUSR1
1264 * if the fork fails, since we asked by setting its
1265 * want_signal to true.
1266 */
Namhyung Kim45604712014-05-12 09:47:24 +09001267static void workload_exec_failed_signal(int signo __maybe_unused,
1268 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001269 void *ucontext __maybe_unused)
1270{
1271 workload_exec_errno = info->si_value.sival_int;
1272 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001273 child_finished = 1;
1274}
1275
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001276static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001277static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001278
Wang Nanee667f92016-06-27 10:24:05 +00001279static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001280perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001281{
Wang Nanb2cb6152016-07-14 08:34:39 +00001282 if (evlist) {
Jiri Olsa547740f2019-07-27 22:07:44 +02001283 if (evlist->mmap && evlist->mmap[0].core.base)
1284 return evlist->mmap[0].core.base;
1285 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1286 return evlist->overwrite_mmap[0].core.base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001287 }
Wang Nanee667f92016-06-27 10:24:05 +00001288 return NULL;
1289}
1290
Wang Nanc45628b2016-05-24 02:28:59 +00001291static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1292{
Wang Nanee667f92016-06-27 10:24:05 +00001293 const struct perf_event_mmap_page *pc;
1294
1295 pc = perf_evlist__pick_pc(rec->evlist);
1296 if (pc)
1297 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001298 return NULL;
1299}
1300
Wang Nan4ea648a2016-07-14 08:34:47 +00001301static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001302{
1303 struct perf_session *session = rec->session;
1304 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001305 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001306 struct record_opts *opts = &rec->opts;
1307 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001308 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001309 int err = 0;
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001310 event_op f = process_synthesized_event;
Wang Nanc45c86e2016-02-26 09:32:07 +00001311
Wang Nan4ea648a2016-07-14 08:34:47 +00001312 if (rec->opts.tail_synthesize != tail)
1313 return 0;
1314
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001315 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001316 /*
1317 * We need to synthesize events first, because some
1318 * features works on top of them (on report side).
1319 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001320 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001321 process_synthesized_event);
1322 if (err < 0) {
1323 pr_err("Couldn't synthesize attrs.\n");
1324 goto out;
1325 }
1326
Jiri Olsaa2015512018-03-14 10:22:04 +01001327 err = perf_event__synthesize_features(tool, session, rec->evlist,
1328 process_synthesized_event);
1329 if (err < 0) {
1330 pr_err("Couldn't synthesize features.\n");
1331 return err;
1332 }
1333
Jiri Olsace9036a2019-07-21 13:24:23 +02001334 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001335 /*
1336 * FIXME err <= 0 here actually means that
1337 * there were no tracepoints so its not really
1338 * an error, just that we don't need to
1339 * synthesize anything. We really have to
1340 * return this more properly and also
1341 * propagate errors that now are calling die()
1342 */
1343 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1344 process_synthesized_event);
1345 if (err <= 0) {
1346 pr_err("Couldn't record tracing data.\n");
1347 goto out;
1348 }
1349 rec->bytes_written += err;
1350 }
1351 }
1352
Wang Nanc45628b2016-05-24 02:28:59 +00001353 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001354 process_synthesized_event, machine);
1355 if (err)
1356 goto out;
1357
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001358 /* Synthesize id_index before auxtrace_info */
1359 if (rec->opts.auxtrace_sample_mode) {
1360 err = perf_event__synthesize_id_index(tool,
1361 process_synthesized_event,
1362 session->evlist, machine);
1363 if (err)
1364 goto out;
1365 }
1366
Wang Nanc45c86e2016-02-26 09:32:07 +00001367 if (rec->opts.full_auxtrace) {
1368 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1369 session, process_synthesized_event);
1370 if (err)
1371 goto out;
1372 }
1373
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001374 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1375 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1376 machine);
1377 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1378 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1379 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001380
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001381 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1382 machine);
1383 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1384 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1385 "Check /proc/modules permission or run as root.\n");
1386 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001387
1388 if (perf_guest) {
1389 machines__process_guests(&session->machines,
1390 perf_event__synthesize_guest_os, tool);
1391 }
1392
Andi Kleenbfd8f722017-11-17 13:42:58 -08001393 err = perf_event__synthesize_extra_attr(&rec->tool,
1394 rec->evlist,
1395 process_synthesized_event,
1396 data->is_pipe);
1397 if (err)
1398 goto out;
1399
Jiri Olsa03617c22019-07-21 13:24:42 +02001400 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001401 process_synthesized_event,
1402 NULL);
1403 if (err < 0) {
1404 pr_err("Couldn't synthesize thread map.\n");
1405 return err;
1406 }
1407
Jiri Olsaf72f9012019-07-21 13:24:41 +02001408 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001409 process_synthesized_event, NULL);
1410 if (err < 0) {
1411 pr_err("Couldn't synthesize cpu map.\n");
1412 return err;
1413 }
1414
Song Liue5416952019-03-11 22:30:41 -07001415 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001416 machine, opts);
1417 if (err < 0)
1418 pr_warning("Couldn't synthesize bpf events.\n");
1419
Namhyung Kimab640692020-03-25 21:45:33 +09001420 err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
1421 machine);
1422 if (err < 0)
1423 pr_warning("Couldn't synthesize cgroup events.\n");
1424
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001425 if (rec->opts.nr_threads_synthesize > 1) {
1426 perf_set_multithreaded();
1427 f = process_locked_synthesized_event;
1428 }
1429
Jiri Olsa03617c22019-07-21 13:24:42 +02001430 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001431 f, opts->sample_address,
1432 rec->opts.nr_threads_synthesize);
1433
1434 if (rec->opts.nr_threads_synthesize > 1)
1435 perf_set_singlethreaded();
1436
Wang Nanc45c86e2016-02-26 09:32:07 +00001437out:
1438 return err;
1439}
1440
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03001441static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
1442{
1443 struct record *rec = data;
1444 pthread_kill(rec->thread_id, SIGUSR2);
1445 return 0;
1446}
1447
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001448static int record__setup_sb_evlist(struct record *rec)
1449{
1450 struct record_opts *opts = &rec->opts;
1451
1452 if (rec->sb_evlist != NULL) {
1453 /*
1454 * We get here if --switch-output-event populated the
1455 * sb_evlist, so associate a callback that will send a SIGUSR2
1456 * to the main thread.
1457 */
1458 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
1459 rec->thread_id = pthread_self();
1460 }
1461
1462 if (!opts->no_bpf_event) {
1463 if (rec->sb_evlist == NULL) {
1464 rec->sb_evlist = evlist__new();
1465
1466 if (rec->sb_evlist == NULL) {
1467 pr_err("Couldn't create side band evlist.\n.");
1468 return -1;
1469 }
1470 }
1471
1472 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
1473 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1474 return -1;
1475 }
1476 }
1477
1478 if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
1479 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1480 opts->no_bpf_event = true;
1481 }
1482
1483 return 0;
1484}
1485
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001486static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001487{
David Ahern57706ab2013-11-06 11:41:34 -07001488 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001489 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001490 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001491 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001492 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001493 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001494 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001495 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001496 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001497 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001498 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001499
Namhyung Kim45604712014-05-12 09:47:24 +09001500 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001501 signal(SIGCHLD, sig_handler);
1502 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001503 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001504 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001505
Hari Bathinif3b36142017-03-08 02:11:43 +05301506 if (rec->opts.record_namespaces)
1507 tool->namespace_events = true;
1508
Namhyung Kim8fb4b672020-03-25 21:45:34 +09001509 if (rec->opts.record_cgroup) {
1510#ifdef HAVE_FILE_HANDLE
1511 tool->cgroup_events = true;
1512#else
1513 pr_err("cgroup tracking is not supported\n");
1514 return -1;
1515#endif
1516 }
1517
Jiri Olsadc0c6122017-01-09 10:51:58 +01001518 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001519 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001520 if (rec->opts.auxtrace_snapshot_mode)
1521 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001522 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001523 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001524 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001525 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001526 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001527
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001528 session = perf_session__new(data, false, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301529 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09001530 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301531 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001532 }
1533
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001534 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001535 rec->session = session;
1536
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001537 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1538 pr_err("Compression initialization failed.\n");
1539 return -1;
1540 }
1541
1542 session->header.env.comp_type = PERF_COMP_ZSTD;
1543 session->header.env.comp_level = rec->opts.comp_level;
1544
Adrian Huntereeb399b2019-10-04 11:31:21 +03001545 if (rec->opts.kcore &&
1546 !record__kcore_readable(&session->machines.host)) {
1547 pr_err("ERROR: kcore is not readable.\n");
1548 return -1;
1549 }
1550
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001551 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001552
Alexey Budankovcf790512018-10-09 17:36:24 +03001553 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1554 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1555
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001556 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001557 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001558 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001559 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001560 if (err < 0) {
1561 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001562 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001563 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001564 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001565 }
1566
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001567 /*
1568 * If we have just single event and are sending data
1569 * through pipe, we need to force the ids allocation,
1570 * because we synthesize event name through the pipe
1571 * and need the id for that.
1572 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001573 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001574 rec->opts.sample_id = true;
1575
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001576 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001577 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001578 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001579 }
Jiri Olsaf6fa4372019-08-06 15:14:05 +02001580 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001581
Adrian Huntereeb399b2019-10-04 11:31:21 +03001582 if (rec->opts.kcore) {
1583 err = record__kcore_copy(&session->machines.host, data);
1584 if (err) {
1585 pr_err("ERROR: Failed to copy kcore\n");
1586 goto out_child;
1587 }
1588 }
1589
Wang Nan8690a2a2016-02-22 09:10:32 +00001590 err = bpf__apply_obj_config();
1591 if (err) {
1592 char errbuf[BUFSIZ];
1593
1594 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1595 pr_err("ERROR: Apply config to BPF failed: %s\n",
1596 errbuf);
1597 goto out_child;
1598 }
1599
Adrian Huntercca84822015-08-19 17:29:21 +03001600 /*
1601 * Normally perf_session__new would do this, but it doesn't have the
1602 * evlist.
1603 */
1604 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1605 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1606 rec->tool.ordered_events = false;
1607 }
1608
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001609 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001610 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1611
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001612 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001613 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001614 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001615 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001616 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001617 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001618 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001619 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001620 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001621
Arnaldo Carvalho de Melob38d85e2020-04-24 12:24:51 -03001622 err = -1;
David Ahernd3665492012-02-06 15:27:52 -07001623 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001624 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001625 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001626 "Use --no-buildid to profile anyway.\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001627 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001628 }
1629
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001630 err = record__setup_sb_evlist(rec);
1631 if (err)
1632 goto out_child;
Song Liu657ee552019-03-11 22:30:50 -07001633
Wang Nan4ea648a2016-07-14 08:34:47 +00001634 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001635 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001636 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001637
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001638 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001639 struct sched_param param;
1640
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001641 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001642 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001643 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001644 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001645 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001646 }
1647 }
1648
Jiri Olsa774cb492012-11-12 18:34:01 +01001649 /*
1650 * When perf is starting the traced process, all the events
1651 * (apart from group members) have enable_on_exec=1 set,
1652 * so don't spoil it by prematurely enabling them.
1653 */
Andi Kleen6619a532014-01-11 13:38:27 -08001654 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001655 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001656
Peter Zijlstra856e9662009-12-16 17:55:55 +01001657 /*
1658 * Let the child rip
1659 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001660 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001661 struct machine *machine = &session->machines.host;
Namhyung Kime5bed562015-09-30 10:45:24 +09001662 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301663 pid_t tgid;
Namhyung Kime5bed562015-09-30 10:45:24 +09001664
1665 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1666 if (event == NULL) {
1667 err = -ENOMEM;
1668 goto out_child;
1669 }
1670
Namhyung Kime803cf92015-09-22 09:24:55 +09001671 /*
1672 * Some H/W events are generated before COMM event
1673 * which is emitted during exec(), so perf script
1674 * cannot see a correct process name for those events.
1675 * Synthesize COMM event to prevent it.
1676 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301677 tgid = perf_event__synthesize_comm(tool, event,
1678 rec->evlist->workload.pid,
1679 process_synthesized_event,
1680 machine);
1681 free(event);
1682
1683 if (tgid == -1)
1684 goto out_child;
1685
1686 event = malloc(sizeof(event->namespaces) +
1687 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1688 machine->id_hdr_size);
1689 if (event == NULL) {
1690 err = -ENOMEM;
1691 goto out_child;
1692 }
1693
1694 /*
1695 * Synthesize NAMESPACES event for the command specified.
1696 */
1697 perf_event__synthesize_namespaces(tool, event,
1698 rec->evlist->workload.pid,
1699 tgid, process_synthesized_event,
1700 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +09001701 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001702
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001703 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001704 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001705
Andi Kleen6619a532014-01-11 13:38:27 -08001706 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001707 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001708 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001709 }
1710
Wang Nan5f9cf592016-04-20 18:59:49 +00001711 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001712 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001713 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001714 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001715 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001716
Wang Nan057374642016-07-14 08:34:43 +00001717 /*
1718 * rec->evlist->bkw_mmap_state is possible to be
1719 * BKW_MMAP_EMPTY here: when done == true and
1720 * hits != rec->samples in previous round.
1721 *
1722 * perf_evlist__toggle_bkw_mmap ensure we never
1723 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1724 */
1725 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1726 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1727
Alexey Budankov470530b2019-03-18 20:40:26 +03001728 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001729 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001730 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001731 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001732 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001733 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001734
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001735 if (auxtrace_record__snapshot_started) {
1736 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001737 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001738 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001739 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001740 pr_err("AUX area tracing snapshot failed\n");
1741 err = -1;
1742 goto out_child;
1743 }
1744 }
1745
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001746 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001747 /*
1748 * If switch_output_trigger is hit, the data in
1749 * overwritable ring buffer should have been collected,
1750 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1751 *
1752 * If SIGUSR2 raise after or during record__mmap_read_all(),
1753 * record__mmap_read_all() didn't collect data from
1754 * overwritable ring buffer. Read again.
1755 */
1756 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1757 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001758 trigger_ready(&switch_output_trigger);
1759
Wang Nan057374642016-07-14 08:34:43 +00001760 /*
1761 * Reenable events in overwrite ring buffer after
1762 * record__mmap_read_all(): we should have collected
1763 * data from it.
1764 */
1765 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1766
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001767 if (!quiet)
1768 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1769 waking);
1770 waking = 0;
1771 fd = record__switch_output(rec, false);
1772 if (fd < 0) {
1773 pr_err("Failed to switch to new file\n");
1774 trigger_error(&switch_output_trigger);
1775 err = fd;
1776 goto out_child;
1777 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001778
1779 /* re-arm the alarm */
1780 if (rec->switch_output.time)
1781 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001782 }
1783
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001784 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001785 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001786 break;
Jiri Olsa80ab2982019-08-31 22:48:33 +02001787 err = evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001788 /*
1789 * Propagate error, only if there's any. Ignore positive
1790 * number of returned events and interrupt error.
1791 */
1792 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001793 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001794 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001795
Jiri Olsaf4009e72019-08-16 16:00:45 +02001796 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001797 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001798 }
1799
Jiri Olsa774cb492012-11-12 18:34:01 +01001800 /*
1801 * When perf is starting the traced process, at the end events
1802 * die with the process and we wait for that. Thus no need to
1803 * disable events in this case.
1804 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001805 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001806 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001807 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001808 disabled = true;
1809 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001810 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001811
Wang Nan5f9cf592016-04-20 18:59:49 +00001812 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001813 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001814
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001815 if (opts->auxtrace_snapshot_on_exit)
1816 record__auxtrace_snapshot_exit(rec);
1817
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001818 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001819 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001820 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001821 pr_err("Workload failed: %s\n", emsg);
1822 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001823 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001824 }
1825
Namhyung Kime3d59112015-01-29 17:06:44 +09001826 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001827 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001828
Wang Nan4ea648a2016-07-14 08:34:47 +00001829 if (target__none(&rec->opts.target))
1830 record__synthesize_workload(rec, true);
1831
Namhyung Kim45604712014-05-12 09:47:24 +09001832out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001833 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001834 record__aio_mmap_read_sync(rec);
1835
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001836 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1837 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1838 session->header.env.comp_ratio = ratio + 0.5;
1839 }
1840
Namhyung Kim45604712014-05-12 09:47:24 +09001841 if (forks) {
1842 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001843
Namhyung Kim45604712014-05-12 09:47:24 +09001844 if (!child_finished)
1845 kill(rec->evlist->workload.pid, SIGTERM);
1846
1847 wait(&exit_status);
1848
1849 if (err < 0)
1850 status = err;
1851 else if (WIFEXITED(exit_status))
1852 status = WEXITSTATUS(exit_status);
1853 else if (WIFSIGNALED(exit_status))
1854 signr = WTERMSIG(exit_status);
1855 } else
1856 status = err;
1857
Wang Nan4ea648a2016-07-14 08:34:47 +00001858 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001859 /* this will be recalculated during process_buildids() */
1860 rec->samples = 0;
1861
Wang Nanecfd7a92016-04-13 08:21:07 +00001862 if (!err) {
1863 if (!rec->timestamp_filename) {
1864 record__finish_output(rec);
1865 } else {
1866 fd = record__switch_output(rec, true);
1867 if (fd < 0) {
1868 status = fd;
1869 goto out_delete_session;
1870 }
1871 }
1872 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001873
Wang Nana0748652016-11-26 07:03:28 +00001874 perf_hooks__invoke_record_end();
1875
Namhyung Kime3d59112015-01-29 17:06:44 +09001876 if (!err && !quiet) {
1877 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001878 const char *postfix = rec->timestamp_filename ?
1879 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001880
Adrian Hunteref149c22015-04-09 18:53:45 +03001881 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001882 scnprintf(samples, sizeof(samples),
1883 " (%" PRIu64 " samples)", rec->samples);
1884 else
1885 samples[0] = '\0';
1886
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001887 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001888 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001889 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001890 if (ratio) {
1891 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1892 rec->session->bytes_transferred / 1024.0 / 1024.0,
1893 ratio);
1894 }
1895 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001896 }
1897
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001898out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001899 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001900 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001901
1902 if (!opts->no_bpf_event)
Arnaldo Carvalho de Melobc477d792020-04-24 10:24:04 -03001903 perf_evlist__stop_sb_thread(rec->sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001904 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001905}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001906
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001907static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001908{
Kan Liangaad2b212015-01-05 13:23:04 -05001909 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001910
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001911 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001912
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001913 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001914 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001915 callchain->dump_size);
1916}
1917
1918int record_opts__parse_callchain(struct record_opts *record,
1919 struct callchain_param *callchain,
1920 const char *arg, bool unset)
1921{
1922 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001923 callchain->enabled = !unset;
1924
1925 /* --no-call-graph */
1926 if (unset) {
1927 callchain->record_mode = CALLCHAIN_NONE;
1928 pr_debug("callchain: disabled\n");
1929 return 0;
1930 }
1931
1932 ret = parse_callchain_record_opt(arg, callchain);
1933 if (!ret) {
1934 /* Enable data address sampling for DWARF unwind. */
1935 if (callchain->record_mode == CALLCHAIN_DWARF)
1936 record->sample_address = true;
1937 callchain_debug(callchain);
1938 }
1939
1940 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001941}
1942
Kan Liangc421e802015-07-29 05:42:12 -04001943int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001944 const char *arg,
1945 int unset)
1946{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001947 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001948}
1949
Kan Liangc421e802015-07-29 05:42:12 -04001950int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001951 const char *arg __maybe_unused,
1952 int unset __maybe_unused)
1953{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001954 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001955
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001956 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001957
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001958 if (callchain->record_mode == CALLCHAIN_NONE)
1959 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001960
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001961 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001962 return 0;
1963}
1964
Jiri Olsaeb853e82014-02-03 12:44:42 +01001965static int perf_record_config(const char *var, const char *value, void *cb)
1966{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001967 struct record *rec = cb;
1968
1969 if (!strcmp(var, "record.build-id")) {
1970 if (!strcmp(value, "cache"))
1971 rec->no_buildid_cache = false;
1972 else if (!strcmp(value, "no-cache"))
1973 rec->no_buildid_cache = true;
1974 else if (!strcmp(value, "skip"))
1975 rec->no_buildid = true;
1976 else
1977 return -1;
1978 return 0;
1979 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001980 if (!strcmp(var, "record.call-graph")) {
1981 var = "call-graph.record-mode";
1982 return perf_default_config(var, value, cb);
1983 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001984#ifdef HAVE_AIO_SUPPORT
1985 if (!strcmp(var, "record.aio")) {
1986 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1987 if (!rec->opts.nr_cblocks)
1988 rec->opts.nr_cblocks = nr_cblocks_default;
1989 }
1990#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001991
Yisheng Xiecff17202018-03-12 19:25:57 +08001992 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001993}
1994
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001995struct clockid_map {
1996 const char *name;
1997 int clockid;
1998};
1999
2000#define CLOCKID_MAP(n, c) \
2001 { .name = n, .clockid = (c), }
2002
2003#define CLOCKID_END { .name = NULL, }
2004
2005
2006/*
2007 * Add the missing ones, we need to build on many distros...
2008 */
2009#ifndef CLOCK_MONOTONIC_RAW
2010#define CLOCK_MONOTONIC_RAW 4
2011#endif
2012#ifndef CLOCK_BOOTTIME
2013#define CLOCK_BOOTTIME 7
2014#endif
2015#ifndef CLOCK_TAI
2016#define CLOCK_TAI 11
2017#endif
2018
2019static const struct clockid_map clockids[] = {
2020 /* available for all events, NMI safe */
2021 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
2022 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
2023
2024 /* available for some events */
2025 CLOCKID_MAP("realtime", CLOCK_REALTIME),
2026 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
2027 CLOCKID_MAP("tai", CLOCK_TAI),
2028
2029 /* available for the lazy */
2030 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
2031 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
2032 CLOCKID_MAP("real", CLOCK_REALTIME),
2033 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
2034
2035 CLOCKID_END,
2036};
2037
Alexey Budankovcf790512018-10-09 17:36:24 +03002038static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
2039{
2040 struct timespec res;
2041
2042 *res_ns = 0;
2043 if (!clock_getres(clk_id, &res))
2044 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
2045 else
2046 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
2047
2048 return 0;
2049}
2050
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002051static int parse_clockid(const struct option *opt, const char *str, int unset)
2052{
2053 struct record_opts *opts = (struct record_opts *)opt->value;
2054 const struct clockid_map *cm;
2055 const char *ostr = str;
2056
2057 if (unset) {
2058 opts->use_clockid = 0;
2059 return 0;
2060 }
2061
2062 /* no arg passed */
2063 if (!str)
2064 return 0;
2065
2066 /* no setting it twice */
2067 if (opts->use_clockid)
2068 return -1;
2069
2070 opts->use_clockid = true;
2071
2072 /* if its a number, we're done */
2073 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03002074 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002075
2076 /* allow a "CLOCK_" prefix to the name */
2077 if (!strncasecmp(str, "CLOCK_", 6))
2078 str += 6;
2079
2080 for (cm = clockids; cm->name; cm++) {
2081 if (!strcasecmp(str, cm->name)) {
2082 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03002083 return get_clockid_res(opts->clockid,
2084 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002085 }
2086 }
2087
2088 opts->use_clockid = false;
2089 ui__warning("unknown clockid %s, check man page\n", ostr);
2090 return -1;
2091}
2092
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002093static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2094{
2095 struct record_opts *opts = (struct record_opts *)opt->value;
2096
2097 if (unset || !str)
2098 return 0;
2099
2100 if (!strcasecmp(str, "node"))
2101 opts->affinity = PERF_AFFINITY_NODE;
2102 else if (!strcasecmp(str, "cpu"))
2103 opts->affinity = PERF_AFFINITY_CPU;
2104
2105 return 0;
2106}
2107
Jiwei Sun6d575812019-10-22 16:09:01 +08002108static int parse_output_max_size(const struct option *opt,
2109 const char *str, int unset)
2110{
2111 unsigned long *s = (unsigned long *)opt->value;
2112 static struct parse_tag tags_size[] = {
2113 { .tag = 'B', .mult = 1 },
2114 { .tag = 'K', .mult = 1 << 10 },
2115 { .tag = 'M', .mult = 1 << 20 },
2116 { .tag = 'G', .mult = 1 << 30 },
2117 { .tag = 0 },
2118 };
2119 unsigned long val;
2120
2121 if (unset) {
2122 *s = 0;
2123 return 0;
2124 }
2125
2126 val = parse_tag_value(str, tags_size);
2127 if (val != (unsigned long) -1) {
2128 *s = val;
2129 return 0;
2130 }
2131
2132 return -1;
2133}
2134
Adrian Huntere9db1312015-04-09 18:53:46 +03002135static int record__parse_mmap_pages(const struct option *opt,
2136 const char *str,
2137 int unset __maybe_unused)
2138{
2139 struct record_opts *opts = opt->value;
2140 char *s, *p;
2141 unsigned int mmap_pages;
2142 int ret;
2143
2144 if (!str)
2145 return -EINVAL;
2146
2147 s = strdup(str);
2148 if (!s)
2149 return -ENOMEM;
2150
2151 p = strchr(s, ',');
2152 if (p)
2153 *p = '\0';
2154
2155 if (*s) {
2156 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
2157 if (ret)
2158 goto out_free;
2159 opts->mmap_pages = mmap_pages;
2160 }
2161
2162 if (!p) {
2163 ret = 0;
2164 goto out_free;
2165 }
2166
2167 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
2168 if (ret)
2169 goto out_free;
2170
2171 opts->auxtrace_mmap_pages = mmap_pages;
2172
2173out_free:
2174 free(s);
2175 return ret;
2176}
2177
Jiri Olsa0c582442017-01-09 10:51:59 +01002178static void switch_output_size_warn(struct record *rec)
2179{
Jiri Olsa9521b5f2019-07-28 12:45:35 +02002180 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
Jiri Olsa0c582442017-01-09 10:51:59 +01002181 struct switch_output *s = &rec->switch_output;
2182
2183 wakeup_size /= 2;
2184
2185 if (s->size < wakeup_size) {
2186 char buf[100];
2187
2188 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
2189 pr_warning("WARNING: switch-output data size lower than "
2190 "wakeup kernel buffer size (%s) "
2191 "expect bigger perf.data sizes\n", buf);
2192 }
2193}
2194
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002195static int switch_output_setup(struct record *rec)
2196{
2197 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002198 static struct parse_tag tags_size[] = {
2199 { .tag = 'B', .mult = 1 },
2200 { .tag = 'K', .mult = 1 << 10 },
2201 { .tag = 'M', .mult = 1 << 20 },
2202 { .tag = 'G', .mult = 1 << 30 },
2203 { .tag = 0 },
2204 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002205 static struct parse_tag tags_time[] = {
2206 { .tag = 's', .mult = 1 },
2207 { .tag = 'm', .mult = 60 },
2208 { .tag = 'h', .mult = 60*60 },
2209 { .tag = 'd', .mult = 60*60*24 },
2210 { .tag = 0 },
2211 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002212 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002213
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002214 /*
2215 * If we're using --switch-output-events, then we imply its
2216 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
2217 * thread to its parent.
2218 */
2219 if (rec->switch_output_event_set)
2220 goto do_signal;
2221
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002222 if (!s->set)
2223 return 0;
2224
2225 if (!strcmp(s->str, "signal")) {
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002226do_signal:
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002227 s->signal = true;
2228 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002229 goto enabled;
2230 }
2231
2232 val = parse_tag_value(s->str, tags_size);
2233 if (val != (unsigned long) -1) {
2234 s->size = val;
2235 pr_debug("switch-output with %s size threshold\n", s->str);
2236 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002237 }
2238
Jiri Olsabfacbe32017-01-09 10:52:00 +01002239 val = parse_tag_value(s->str, tags_time);
2240 if (val != (unsigned long) -1) {
2241 s->time = val;
2242 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2243 s->str, s->time);
2244 goto enabled;
2245 }
2246
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002247 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002248
2249enabled:
2250 rec->timestamp_filename = true;
2251 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002252
2253 if (s->size && !rec->opts.no_buffering)
2254 switch_output_size_warn(rec);
2255
Jiri Olsadc0c6122017-01-09 10:51:58 +01002256 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002257}
2258
Namhyung Kime5b2c202014-10-23 00:15:46 +09002259static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002260 "perf record [<options>] [<command>]",
2261 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002262 NULL
2263};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002264const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002265
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002266static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
2267 struct perf_sample *sample, struct machine *machine)
2268{
2269 /*
2270 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2271 * no need to add them twice.
2272 */
2273 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2274 return 0;
2275 return perf_event__process_mmap(tool, event, sample, machine);
2276}
2277
2278static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
2279 struct perf_sample *sample, struct machine *machine)
2280{
2281 /*
2282 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2283 * no need to add them twice.
2284 */
2285 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2286 return 0;
2287
2288 return perf_event__process_mmap2(tool, event, sample, machine);
2289}
2290
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002291/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002292 * XXX Ideally would be local to cmd_record() and passed to a record__new
2293 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002294 * after cmd_record() exits, but since record_options need to be accessible to
2295 * builtin-script, leave it here.
2296 *
2297 * At least we don't ouch it in all the other functions here directly.
2298 *
2299 * Just say no to tons of global variables, sigh.
2300 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002301static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002302 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002303 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002304 .mmap_pages = UINT_MAX,
2305 .user_freq = UINT_MAX,
2306 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002307 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002308 .target = {
2309 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002310 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002311 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002312 .mmap_flush = MMAP_FLUSH_DEFAULT,
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002313 .nr_threads_synthesize = 1,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002314 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002315 .tool = {
2316 .sample = process_sample_event,
2317 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002318 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002319 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302320 .namespaces = perf_event__process_namespaces,
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002321 .mmap = build_id__process_mmap,
2322 .mmap2 = build_id__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002323 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002324 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002325};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002326
Namhyung Kim76a26542015-10-22 23:28:32 +09002327const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2328 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002329
Wang Nan0aab2132016-06-16 08:02:41 +00002330static bool dry_run;
2331
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002332/*
2333 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2334 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002335 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002336 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2337 * using pipes, etc.
2338 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002339static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002340 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002341 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002342 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002343 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002344 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002345 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2346 NULL, "don't record events from perf itself",
2347 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002348 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002349 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002350 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002351 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002352 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002353 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002354 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002355 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002356 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002357 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002358 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002359 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002360 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002361 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002362 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002363 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002364 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002365 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2366 &record.opts.no_inherit_set,
2367 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002368 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2369 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002370 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002371 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002372 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2373 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002374 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2375 "profile at this frequency",
2376 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002377 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2378 "number of mmap data pages and AUX area tracing mmap pages",
2379 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002380 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2381 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2382 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002383 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002384 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002385 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002386 NULL, "enables call-graph recording" ,
2387 &record_callchain_opt),
2388 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002389 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002390 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002391 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002392 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002393 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002394 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002395 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002396 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002397 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2398 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002399 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002400 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2401 &record.opts.sample_time_set,
2402 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002403 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2404 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002405 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002406 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002407 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2408 &record.no_buildid_cache_set,
2409 "do not update the buildid cache"),
2410 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2411 &record.no_buildid_set,
2412 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002413 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002414 "monitor event in cgroup name only",
2415 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002416 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002417 "ms to wait before starting measurement after program start"),
Adrian Huntereeb399b2019-10-04 11:31:21 +03002418 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002419 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2420 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002421
2422 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2423 "branch any", "sample any taken branches",
2424 parse_branch_stack),
2425
2426 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2427 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002428 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002429 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2430 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002431 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2432 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002433 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2434 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002435 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2436 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002437 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002438 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2439 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002440 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002441 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2442 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002443 OPT_CALLBACK('k', "clockid", &record.opts,
2444 "clockid", "clockid to use for events, see clock_gettime()",
2445 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002446 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2447 "opts", "AUX area tracing Snapshot Mode", ""),
Adrian Hunterc0a6de02019-11-15 14:42:16 +02002448 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
2449 "opts", "sample AUX area", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002450 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002451 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302452 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2453 "Record namespaces events"),
Namhyung Kim8fb4b672020-03-25 21:45:34 +09002454 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
2455 "Record cgroup events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002456 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2457 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002458 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2459 "Configure all used events to run in kernel space.",
2460 PARSE_OPT_EXCLUSIVE),
2461 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2462 "Configure all used events to run in user space.",
2463 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002464 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2465 "collect kernel callchains"),
2466 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2467 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002468 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2469 "clang binary to use for compiling BPF scriptlets"),
2470 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2471 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002472 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2473 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002474 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2475 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002476 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2477 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002478 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2479 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002480 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002481 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2482 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002483 "signal"),
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002484 OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
2485 "switch output event selector. use 'perf list' to list available events",
2486 parse_events_option_new_evlist),
Andi Kleen03724b22019-03-14 15:49:55 -07002487 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2488 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002489 OPT_BOOLEAN(0, "dry-run", &dry_run,
2490 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002491#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002492 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2493 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002494 record__aio_parse),
2495#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002496 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2497 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2498 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002499#ifdef HAVE_ZSTD_SUPPORT
2500 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2501 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2502 record__parse_comp_level),
2503#endif
Jiwei Sun6d575812019-10-22 16:09:01 +08002504 OPT_CALLBACK(0, "max-size", &record.output_max_size,
2505 "size", "Limit the maximum size of the output file", parse_output_max_size),
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002506 OPT_UINTEGER(0, "num-thread-synthesize",
2507 &record.opts.nr_threads_synthesize,
2508 "number of threads to run for event synthesis"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002509 OPT_END()
2510};
2511
Namhyung Kime5b2c202014-10-23 00:15:46 +09002512struct option *record_options = __record_options;
2513
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002514int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002515{
Adrian Hunteref149c22015-04-09 18:53:45 +03002516 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002517 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002518 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002519
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002520 setlocale(LC_ALL, "");
2521
Wang Nan48e1cab2015-12-14 10:39:22 +00002522#ifndef HAVE_LIBBPF_SUPPORT
2523# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2524 set_nobuild('\0', "clang-path", true);
2525 set_nobuild('\0', "clang-opt", true);
2526# undef set_nobuild
2527#endif
2528
He Kuang7efe0e02015-12-14 10:39:23 +00002529#ifndef HAVE_BPF_PROLOGUE
2530# if !defined (HAVE_DWARF_SUPPORT)
2531# define REASON "NO_DWARF=1"
2532# elif !defined (HAVE_LIBBPF_SUPPORT)
2533# define REASON "NO_LIBBPF=1"
2534# else
2535# define REASON "this architecture doesn't support BPF prologue"
2536# endif
2537# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2538 set_nobuild('\0', "vmlinux", true);
2539# undef set_nobuild
2540# undef REASON
2541#endif
2542
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002543 rec->opts.affinity = PERF_AFFINITY_SYS;
2544
Jiri Olsa0f98b112019-07-21 13:23:55 +02002545 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002546 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002547 return -ENOMEM;
2548
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002549 err = perf_config(perf_record_config, rec);
2550 if (err)
2551 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002552
Tom Zanussibca647a2010-11-10 08:11:30 -06002553 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002554 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002555 if (quiet)
2556 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002557
2558 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002559 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002560 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002561
Namhyung Kimbea03402012-04-26 14:15:15 +09002562 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002563 usage_with_options_msg(record_usage, record_options,
2564 "cgroup monitoring only available in system-wide mode");
2565
Stephane Eranian023695d2011-02-14 11:20:01 +02002566 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002567
Adrian Huntereeb399b2019-10-04 11:31:21 +03002568 if (rec->opts.kcore)
2569 rec->data.is_dir = true;
2570
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002571 if (rec->opts.comp_level != 0) {
2572 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2573 rec->no_buildid = true;
2574 }
2575
Adrian Hunterb757bb02015-07-21 12:44:04 +03002576 if (rec->opts.record_switch_events &&
2577 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002578 ui__error("kernel does not support recording context switch events\n");
2579 parse_options_usage(record_usage, record_options, "switch-events", 0);
2580 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002581 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002582
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002583 if (switch_output_setup(rec)) {
2584 parse_options_usage(record_usage, record_options, "switch-output", 0);
2585 return -EINVAL;
2586 }
2587
Jiri Olsabfacbe32017-01-09 10:52:00 +01002588 if (rec->switch_output.time) {
2589 signal(SIGALRM, alarm_sig_handler);
2590 alarm(rec->switch_output.time);
2591 }
2592
Andi Kleen03724b22019-03-14 15:49:55 -07002593 if (rec->switch_output.num_files) {
2594 rec->switch_output.filenames = calloc(sizeof(char *),
2595 rec->switch_output.num_files);
2596 if (!rec->switch_output.filenames)
2597 return -EINVAL;
2598 }
2599
Adrian Hunter1b36c032016-09-23 17:38:39 +03002600 /*
2601 * Allow aliases to facilitate the lookup of symbols for address
2602 * filters. Refer to auxtrace_parse_filters().
2603 */
2604 symbol_conf.allow_aliases = true;
2605
2606 symbol__init(NULL);
2607
Alexey Budankov8384a262019-12-03 14:45:27 +03002608 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
2609 rec->affinity_mask.nbits = cpu__max_cpu();
2610 rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
2611 if (!rec->affinity_mask.bits) {
2612 pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
2613 return -ENOMEM;
2614 }
2615 pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
2616 }
2617
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002618 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002619 if (err)
2620 goto out;
2621
Wang Nan0aab2132016-06-16 08:02:41 +00002622 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002623 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002624
Wang Nand7888572016-04-08 15:07:24 +00002625 err = bpf__setup_stdout(rec->evlist);
2626 if (err) {
2627 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2628 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2629 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002630 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002631 }
2632
Adrian Hunteref149c22015-04-09 18:53:45 +03002633 err = -ENOMEM;
2634
Wang Nan0c1d46a2016-04-20 18:59:52 +00002635 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002636 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002637 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002638 /*
2639 * In 'perf record --switch-output', disable buildid
2640 * generation by default to reduce data file switching
2641 * overhead. Still generate buildid if they are required
2642 * explicitly using
2643 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002644 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002645 * --no-no-buildid-cache
2646 *
2647 * Following code equals to:
2648 *
2649 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2650 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2651 * disable_buildid_cache();
2652 */
2653 bool disable = true;
2654
2655 if (rec->no_buildid_set && !rec->no_buildid)
2656 disable = false;
2657 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2658 disable = false;
2659 if (disable) {
2660 rec->no_buildid = true;
2661 rec->no_buildid_cache = true;
2662 disable_buildid_cache();
2663 }
2664 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002665
Wang Nan4ea648a2016-07-14 08:34:47 +00002666 if (record.opts.overwrite)
2667 record.opts.tail_synthesize = true;
2668
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002669 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002670 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002671 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002672 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002673 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002674
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002675 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2676 rec->opts.no_inherit = true;
2677
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002678 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002679 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002680 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002681 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002682 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002683
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002684 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002685 if (err) {
2686 int saved_errno = errno;
2687
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002688 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002689 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002690
2691 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002692 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002693 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002694
Mengting Zhangca800062017-12-13 15:01:53 +08002695 /* Enable ignoring missing threads when -u/-p option is defined. */
2696 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002697
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002698 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002699 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002700 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002701
Adrian Hunteref149c22015-04-09 18:53:45 +03002702 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2703 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002704 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002705
Namhyung Kim61566812016-01-11 22:37:09 +09002706 /*
2707 * We take all buildids when the file contains
2708 * AUX area tracing data because we do not decode the
2709 * trace because it would take too long.
2710 */
2711 if (rec->opts.full_auxtrace)
2712 rec->buildid_all = true;
2713
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002714 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002715 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002716 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002717 }
2718
Alexey Budankov93f20c02018-11-06 12:07:19 +03002719 if (rec->opts.nr_cblocks > nr_cblocks_max)
2720 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002721 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002722
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002723 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002724 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002725
Alexey Budankov51255a82019-03-18 20:42:19 +03002726 if (rec->opts.comp_level > comp_level_max)
2727 rec->opts.comp_level = comp_level_max;
2728 pr_debug("comp level: %d\n", rec->opts.comp_level);
2729
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002730 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002731out:
Alexey Budankov8384a262019-12-03 14:45:27 +03002732 bitmap_free(rec->affinity_mask.bits);
Jiri Olsac12995a2019-07-21 13:23:56 +02002733 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002734 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002735 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002736 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002737}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002738
2739static void snapshot_sig_handler(int sig __maybe_unused)
2740{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002741 struct record *rec = &record;
2742
Wang Nan5f9cf592016-04-20 18:59:49 +00002743 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2744 trigger_hit(&auxtrace_snapshot_trigger);
2745 auxtrace_record__snapshot_started = 1;
2746 if (auxtrace_record__snapshot_start(record.itr))
2747 trigger_error(&auxtrace_snapshot_trigger);
2748 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002749
Jiri Olsadc0c6122017-01-09 10:51:58 +01002750 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002751 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002752}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002753
2754static void alarm_sig_handler(int sig __maybe_unused)
2755{
2756 struct record *rec = &record;
2757
2758 if (switch_output_time(rec))
2759 trigger_hit(&switch_output_trigger);
2760}