blob: 7e18a454924c67c002776a12d1abf4b3d69d2cf0 [file] [log] [blame]
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +01001/* SPDX-License-Identifier: GPL-2.0
2 * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +02003 */
4static const char *__doc__=
5 "XDP monitor tool, based on tracepoints\n"
6;
7
8static const char *__doc_err_only__=
9 " NOTICE: Only tracking XDP redirect errors\n"
10 " Enable TX success stats via '--stats'\n"
11 " (which comes with a per packet processing overhead)\n"
12;
13
14#include <errno.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <stdbool.h>
18#include <stdint.h>
19#include <string.h>
20#include <ctype.h>
21#include <unistd.h>
22#include <locale.h>
23
Jesper Dangaard Brouerc4eb7f42017-10-06 10:41:51 +020024#include <sys/resource.h>
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020025#include <getopt.h>
26#include <net/if.h>
27#include <time.h>
28
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070029#include <bpf/bpf.h>
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020030#include "bpf_load.h"
31#include "bpf_util.h"
32
33static int verbose = 1;
34static bool debug = false;
35
36static const struct option long_options[] = {
37 {"help", no_argument, NULL, 'h' },
38 {"debug", no_argument, NULL, 'D' },
39 {"stats", no_argument, NULL, 'S' },
40 {"sec", required_argument, NULL, 's' },
41 {0, 0, NULL, 0 }
42};
43
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +010044/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
45#define EXIT_FAIL_MEM 5
46
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020047static void usage(char *argv[])
48{
49 int i;
50 printf("\nDOCUMENTATION:\n%s\n", __doc__);
51 printf("\n");
52 printf(" Usage: %s (options-see-below)\n",
53 argv[0]);
54 printf(" Listing options:\n");
55 for (i = 0; long_options[i].name != 0; i++) {
56 printf(" --%-15s", long_options[i].name);
57 if (long_options[i].flag != NULL)
58 printf(" flag (internal value:%d)",
59 *long_options[i].flag);
60 else
Prashant Bhole53ea24c2018-05-14 17:29:15 +090061 printf("short-option: -%c",
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020062 long_options[i].val);
63 printf("\n");
64 }
65 printf("\n");
66}
67
68#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
Stephen Hemminger09295672017-10-01 14:07:34 -070069static __u64 gettime(void)
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020070{
71 struct timespec t;
72 int res;
73
74 res = clock_gettime(CLOCK_MONOTONIC, &t);
75 if (res < 0) {
76 fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
77 exit(EXIT_FAILURE);
78 }
79 return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
80}
81
82enum {
83 REDIR_SUCCESS = 0,
84 REDIR_ERROR = 1,
85};
86#define REDIR_RES_MAX 2
87static const char *redir_names[REDIR_RES_MAX] = {
88 [REDIR_SUCCESS] = "Success",
89 [REDIR_ERROR] = "Error",
90};
91static const char *err2str(int err)
92{
93 if (err < REDIR_RES_MAX)
94 return redir_names[err];
95 return NULL;
96}
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +020097/* enum xdp_action */
98#define XDP_UNKNOWN XDP_REDIRECT + 1
99#define XDP_ACTION_MAX (XDP_UNKNOWN + 1)
100static const char *xdp_action_names[XDP_ACTION_MAX] = {
101 [XDP_ABORTED] = "XDP_ABORTED",
102 [XDP_DROP] = "XDP_DROP",
103 [XDP_PASS] = "XDP_PASS",
104 [XDP_TX] = "XDP_TX",
105 [XDP_REDIRECT] = "XDP_REDIRECT",
106 [XDP_UNKNOWN] = "XDP_UNKNOWN",
107};
108static const char *action2str(int action)
109{
110 if (action < XDP_ACTION_MAX)
111 return xdp_action_names[action];
112 return NULL;
113}
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200114
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100115/* Common stats data record shared with _kern.c */
116struct datarec {
117 __u64 processed;
118 __u64 dropped;
119 __u64 info;
120};
121#define MAX_CPUS 64
122
123/* Userspace structs for collection of stats from maps */
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200124struct record {
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200125 __u64 timestamp;
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100126 struct datarec total;
127 struct datarec *cpu;
128};
129struct u64rec {
130 __u64 processed;
131};
132struct record_u64 {
133 /* record for _kern side __u64 values */
134 __u64 timestamp;
135 struct u64rec total;
136 struct u64rec *cpu;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200137};
138
139struct stats_record {
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100140 struct record_u64 xdp_redirect[REDIR_RES_MAX];
141 struct record_u64 xdp_exception[XDP_ACTION_MAX];
142 struct record xdp_cpumap_kthread;
143 struct record xdp_cpumap_enqueue[MAX_CPUS];
Jesper Dangaard Brouer9940fbf2018-05-24 16:46:02 +0200144 struct record xdp_devmap_xmit;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200145};
146
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100147static bool map_collect_record(int fd, __u32 key, struct record *rec)
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200148{
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100149 /* For percpu maps, userspace gets a value per possible CPU */
150 unsigned int nr_cpus = bpf_num_possible_cpus();
151 struct datarec values[nr_cpus];
152 __u64 sum_processed = 0;
153 __u64 sum_dropped = 0;
154 __u64 sum_info = 0;
155 int i;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200156
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100157 if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
158 fprintf(stderr,
159 "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
160 return false;
161 }
162 /* Get time as close as possible to reading map contents */
163 rec->timestamp = gettime();
164
165 /* Record and sum values from each CPU */
166 for (i = 0; i < nr_cpus; i++) {
167 rec->cpu[i].processed = values[i].processed;
168 sum_processed += values[i].processed;
169 rec->cpu[i].dropped = values[i].dropped;
170 sum_dropped += values[i].dropped;
171 rec->cpu[i].info = values[i].info;
172 sum_info += values[i].info;
173 }
174 rec->total.processed = sum_processed;
175 rec->total.dropped = sum_dropped;
176 rec->total.info = sum_info;
177 return true;
178}
179
180static bool map_collect_record_u64(int fd, __u32 key, struct record_u64 *rec)
181{
182 /* For percpu maps, userspace gets a value per possible CPU */
183 unsigned int nr_cpus = bpf_num_possible_cpus();
184 struct u64rec values[nr_cpus];
185 __u64 sum_total = 0;
186 int i;
187
188 if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
189 fprintf(stderr,
190 "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
191 return false;
192 }
193 /* Get time as close as possible to reading map contents */
194 rec->timestamp = gettime();
195
196 /* Record and sum values from each CPU */
197 for (i = 0; i < nr_cpus; i++) {
198 rec->cpu[i].processed = values[i].processed;
199 sum_total += values[i].processed;
200 }
201 rec->total.processed = sum_total;
202 return true;
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200203}
204
205static double calc_period(struct record *r, struct record *p)
206{
207 double period_ = 0;
208 __u64 period = 0;
209
210 period = r->timestamp - p->timestamp;
211 if (period > 0)
212 period_ = ((double) period / NANOSEC_PER_SEC);
213
214 return period_;
215}
216
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100217static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
218{
219 double period_ = 0;
220 __u64 period = 0;
221
222 period = r->timestamp - p->timestamp;
223 if (period > 0)
224 period_ = ((double) period / NANOSEC_PER_SEC);
225
226 return period_;
227}
228
229static double calc_pps(struct datarec *r, struct datarec *p, double period)
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200230{
231 __u64 packets = 0;
232 double pps = 0;
233
234 if (period > 0) {
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100235 packets = r->processed - p->processed;
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200236 pps = packets / period;
237 }
238 return pps;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200239}
240
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100241static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
242{
243 __u64 packets = 0;
244 double pps = 0;
245
246 if (period > 0) {
247 packets = r->processed - p->processed;
248 pps = packets / period;
249 }
250 return pps;
251}
252
253static double calc_drop(struct datarec *r, struct datarec *p, double period)
254{
255 __u64 packets = 0;
256 double pps = 0;
257
258 if (period > 0) {
259 packets = r->dropped - p->dropped;
260 pps = packets / period;
261 }
262 return pps;
263}
264
265static double calc_info(struct datarec *r, struct datarec *p, double period)
266{
267 __u64 packets = 0;
268 double pps = 0;
269
270 if (period > 0) {
271 packets = r->info - p->info;
272 pps = packets / period;
273 }
274 return pps;
275}
276
277static void stats_print(struct stats_record *stats_rec,
278 struct stats_record *stats_prev,
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200279 bool err_only)
280{
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100281 unsigned int nr_cpus = bpf_num_possible_cpus();
282 int rec_i = 0, i, to_cpu;
283 double t = 0, pps = 0;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200284
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100285 /* Header */
286 printf("%-15s %-7s %-12s %-12s %-9s\n",
287 "XDP-event", "CPU:to", "pps", "drop-pps", "extra-info");
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200288
289 /* tracepoint: xdp:xdp_redirect_* */
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200290 if (err_only)
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100291 rec_i = REDIR_ERROR;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200292
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100293 for (; rec_i < REDIR_RES_MAX; rec_i++) {
294 struct record_u64 *rec, *prev;
295 char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
296 char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200297
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100298 rec = &stats_rec->xdp_redirect[rec_i];
299 prev = &stats_prev->xdp_redirect[rec_i];
300 t = calc_period_u64(rec, prev);
301
302 for (i = 0; i < nr_cpus; i++) {
303 struct u64rec *r = &rec->cpu[i];
304 struct u64rec *p = &prev->cpu[i];
305
306 pps = calc_pps_u64(r, p, t);
307 if (pps > 0)
308 printf(fmt1, "XDP_REDIRECT", i,
309 rec_i ? 0.0: pps, rec_i ? pps : 0.0,
310 err2str(rec_i));
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200311 }
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100312 pps = calc_pps_u64(&rec->total, &prev->total, t);
313 printf(fmt2, "XDP_REDIRECT", "total",
314 rec_i ? 0.0: pps, rec_i ? pps : 0.0, err2str(rec_i));
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200315 }
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200316
317 /* tracepoint: xdp:xdp_exception */
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100318 for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
319 struct record_u64 *rec, *prev;
320 char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
321 char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
322
323 rec = &stats_rec->xdp_exception[rec_i];
324 prev = &stats_prev->xdp_exception[rec_i];
325 t = calc_period_u64(rec, prev);
326
327 for (i = 0; i < nr_cpus; i++) {
328 struct u64rec *r = &rec->cpu[i];
329 struct u64rec *p = &prev->cpu[i];
330
331 pps = calc_pps_u64(r, p, t);
332 if (pps > 0)
333 printf(fmt1, "Exception", i,
Jesper Dangaard Brouer8de0e8b2018-04-17 16:08:06 +0200334 0.0, pps, action2str(rec_i));
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200335 }
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100336 pps = calc_pps_u64(&rec->total, &prev->total, t);
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200337 if (pps > 0)
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100338 printf(fmt2, "Exception", "total",
339 0.0, pps, action2str(rec_i));
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200340 }
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100341
342 /* cpumap enqueue stats */
343 for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
344 char *fmt1 = "%-15s %3d:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
345 char *fmt2 = "%-15s %3s:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
346 struct record *rec, *prev;
347 char *info_str = "";
348 double drop, info;
349
350 rec = &stats_rec->xdp_cpumap_enqueue[to_cpu];
351 prev = &stats_prev->xdp_cpumap_enqueue[to_cpu];
352 t = calc_period(rec, prev);
353 for (i = 0; i < nr_cpus; i++) {
354 struct datarec *r = &rec->cpu[i];
355 struct datarec *p = &prev->cpu[i];
356
357 pps = calc_pps(r, p, t);
358 drop = calc_drop(r, p, t);
359 info = calc_info(r, p, t);
360 if (info > 0) {
361 info_str = "bulk-average";
362 info = pps / info; /* calc average bulk size */
363 }
364 if (pps > 0)
365 printf(fmt1, "cpumap-enqueue",
366 i, to_cpu, pps, drop, info, info_str);
367 }
368 pps = calc_pps(&rec->total, &prev->total, t);
369 if (pps > 0) {
370 drop = calc_drop(&rec->total, &prev->total, t);
371 info = calc_info(&rec->total, &prev->total, t);
372 if (info > 0) {
373 info_str = "bulk-average";
374 info = pps / info; /* calc average bulk size */
375 }
376 printf(fmt2, "cpumap-enqueue",
377 "sum", to_cpu, pps, drop, info, info_str);
378 }
379 }
380
381 /* cpumap kthread stats */
382 {
383 char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.0f %s\n";
384 char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.0f %s\n";
385 struct record *rec, *prev;
386 double drop, info;
387 char *i_str = "";
388
389 rec = &stats_rec->xdp_cpumap_kthread;
390 prev = &stats_prev->xdp_cpumap_kthread;
391 t = calc_period(rec, prev);
392 for (i = 0; i < nr_cpus; i++) {
393 struct datarec *r = &rec->cpu[i];
394 struct datarec *p = &prev->cpu[i];
395
396 pps = calc_pps(r, p, t);
397 drop = calc_drop(r, p, t);
398 info = calc_info(r, p, t);
399 if (info > 0)
400 i_str = "sched";
Jesper Dangaard Brouer9940fbf2018-05-24 16:46:02 +0200401 if (pps > 0 || drop > 0)
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100402 printf(fmt1, "cpumap-kthread",
403 i, pps, drop, info, i_str);
404 }
405 pps = calc_pps(&rec->total, &prev->total, t);
406 drop = calc_drop(&rec->total, &prev->total, t);
407 info = calc_info(&rec->total, &prev->total, t);
408 if (info > 0)
409 i_str = "sched-sum";
410 printf(fmt2, "cpumap-kthread", "total", pps, drop, info, i_str);
411 }
412
Jesper Dangaard Brouer9940fbf2018-05-24 16:46:02 +0200413 /* devmap ndo_xdp_xmit stats */
414 {
415 char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.2f %s\n";
416 char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.2f %s\n";
417 struct record *rec, *prev;
418 double drop, info;
419 char *i_str = "";
420
421 rec = &stats_rec->xdp_devmap_xmit;
422 prev = &stats_prev->xdp_devmap_xmit;
423 t = calc_period(rec, prev);
424 for (i = 0; i < nr_cpus; i++) {
425 struct datarec *r = &rec->cpu[i];
426 struct datarec *p = &prev->cpu[i];
427
428 pps = calc_pps(r, p, t);
429 drop = calc_drop(r, p, t);
430 info = calc_info(r, p, t);
431 if (info > 0) {
432 i_str = "bulk-average";
433 info = (pps+drop) / info; /* calc avg bulk */
434 }
435 if (pps > 0 || drop > 0)
436 printf(fmt1, "devmap-xmit",
437 i, pps, drop, info, i_str);
438 }
439 pps = calc_pps(&rec->total, &prev->total, t);
440 drop = calc_drop(&rec->total, &prev->total, t);
441 info = calc_info(&rec->total, &prev->total, t);
442 if (info > 0) {
443 i_str = "bulk-average";
444 info = (pps+drop) / info; /* calc avg bulk */
445 }
446 printf(fmt2, "devmap-xmit", "total", pps, drop, info, i_str);
447 }
448
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200449 printf("\n");
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200450}
451
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200452static bool stats_collect(struct stats_record *rec)
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200453{
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200454 int fd;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200455 int i;
456
457 /* TODO: Detect if someone unloaded the perf event_fd's, as
458 * this can happen by someone running perf-record -e
459 */
460
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200461 fd = map_data[0].fd; /* map0: redirect_err_cnt */
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100462 for (i = 0; i < REDIR_RES_MAX; i++)
463 map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200464
465 fd = map_data[1].fd; /* map1: exception_cnt */
466 for (i = 0; i < XDP_ACTION_MAX; i++) {
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100467 map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200468 }
469
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100470 fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
471 for (i = 0; i < MAX_CPUS; i++)
472 map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
473
474 fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
475 map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
476
Jesper Dangaard Brouer9940fbf2018-05-24 16:46:02 +0200477 fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
478 map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
479
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200480 return true;
481}
482
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100483static void *alloc_rec_per_cpu(int record_size)
484{
485 unsigned int nr_cpus = bpf_num_possible_cpus();
486 void *array;
487 size_t size;
488
489 size = record_size * nr_cpus;
490 array = malloc(size);
491 memset(array, 0, size);
492 if (!array) {
493 fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
494 exit(EXIT_FAIL_MEM);
495 }
496 return array;
497}
498
499static struct stats_record *alloc_stats_record(void)
500{
501 struct stats_record *rec;
502 int rec_sz;
503 int i;
504
505 /* Alloc main stats_record structure */
506 rec = malloc(sizeof(*rec));
507 memset(rec, 0, sizeof(*rec));
508 if (!rec) {
509 fprintf(stderr, "Mem alloc error\n");
510 exit(EXIT_FAIL_MEM);
511 }
512
513 /* Alloc stats stored per CPU for each record */
514 rec_sz = sizeof(struct u64rec);
515 for (i = 0; i < REDIR_RES_MAX; i++)
516 rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
517
518 for (i = 0; i < XDP_ACTION_MAX; i++)
519 rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
520
521 rec_sz = sizeof(struct datarec);
522 rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
Jesper Dangaard Brouer9940fbf2018-05-24 16:46:02 +0200523 rec->xdp_devmap_xmit.cpu = alloc_rec_per_cpu(rec_sz);
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100524
525 for (i = 0; i < MAX_CPUS; i++)
526 rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
527
528 return rec;
529}
530
531static void free_stats_record(struct stats_record *r)
532{
533 int i;
534
535 for (i = 0; i < REDIR_RES_MAX; i++)
536 free(r->xdp_redirect[i].cpu);
537
538 for (i = 0; i < XDP_ACTION_MAX; i++)
539 free(r->xdp_exception[i].cpu);
540
541 free(r->xdp_cpumap_kthread.cpu);
Jesper Dangaard Brouer9940fbf2018-05-24 16:46:02 +0200542 free(r->xdp_devmap_xmit.cpu);
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100543
544 for (i = 0; i < MAX_CPUS; i++)
545 free(r->xdp_cpumap_enqueue[i].cpu);
546
547 free(r);
548}
549
550/* Pointer swap trick */
551static inline void swap(struct stats_record **a, struct stats_record **b)
552{
553 struct stats_record *tmp;
554
555 tmp = *a;
556 *a = *b;
557 *b = tmp;
558}
559
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200560static void stats_poll(int interval, bool err_only)
561{
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100562 struct stats_record *rec, *prev;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200563
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100564 rec = alloc_stats_record();
565 prev = alloc_stats_record();
566 stats_collect(rec);
567
568 if (err_only)
569 printf("\n%s\n", __doc_err_only__);
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200570
571 /* Trick to pretty printf with thousands separators use %' */
572 setlocale(LC_NUMERIC, "en_US");
573
574 /* Header */
575 if (verbose)
576 printf("\n%s", __doc__);
577
578 /* TODO Need more advanced stats on error types */
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +0200579 if (verbose) {
580 printf(" - Stats map0: %s\n", map_data[0].name);
581 printf(" - Stats map1: %s\n", map_data[1].name);
582 printf("\n");
583 }
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200584 fflush(stdout);
585
586 while (1) {
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100587 swap(&prev, &rec);
588 stats_collect(rec);
589 stats_print(rec, prev, err_only);
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200590 fflush(stdout);
591 sleep(interval);
592 }
Jesper Dangaard Brouer417f1d92018-01-19 17:15:50 +0100593
594 free_stats_record(rec);
595 free_stats_record(prev);
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200596}
597
Stephen Hemminger09295672017-10-01 14:07:34 -0700598static void print_bpf_prog_info(void)
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200599{
600 int i;
601
602 /* Prog info */
603 printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt);
604 for (i = 0; i < prog_cnt; i++) {
605 printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]);
606 }
607
608 /* Maps info */
609 printf("Loaded BPF prog have %d map(s)\n", map_data_count);
610 for (i = 0; i < map_data_count; i++) {
611 char *name = map_data[i].name;
612 int fd = map_data[i].fd;
613
614 printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
615 }
616
617 /* Event info */
618 printf("Searching for (max:%d) event file descriptor(s)\n", prog_cnt);
619 for (i = 0; i < prog_cnt; i++) {
620 if (event_fd[i] != -1)
621 printf(" - event_fd[%d] = fd(%d)\n", i, event_fd[i]);
622 }
623}
624
625int main(int argc, char **argv)
626{
Jesper Dangaard Brouerc4eb7f42017-10-06 10:41:51 +0200627 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200628 int longindex = 0, opt;
629 int ret = EXIT_SUCCESS;
630 char bpf_obj_file[256];
631
632 /* Default settings: */
633 bool errors_only = true;
634 int interval = 2;
635
636 snprintf(bpf_obj_file, sizeof(bpf_obj_file), "%s_kern.o", argv[0]);
637
638 /* Parse commands line args */
Prashant Bhole53ea24c2018-05-14 17:29:15 +0900639 while ((opt = getopt_long(argc, argv, "hDSs:",
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200640 long_options, &longindex)) != -1) {
641 switch (opt) {
642 case 'D':
643 debug = true;
644 break;
645 case 'S':
646 errors_only = false;
647 break;
648 case 's':
649 interval = atoi(optarg);
650 break;
651 case 'h':
652 default:
653 usage(argv);
654 return EXIT_FAILURE;
655 }
656 }
657
Jesper Dangaard Brouerc4eb7f42017-10-06 10:41:51 +0200658 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
659 perror("setrlimit(RLIMIT_MEMLOCK)");
660 return EXIT_FAILURE;
661 }
662
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200663 if (load_bpf_file(bpf_obj_file)) {
664 printf("ERROR - bpf_log_buf: %s", bpf_log_buf);
Jesper Dangaard Brouerc4eb7f42017-10-06 10:41:51 +0200665 return EXIT_FAILURE;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200666 }
667 if (!prog_fd[0]) {
668 printf("ERROR - load_bpf_file: %s\n", strerror(errno));
Jesper Dangaard Brouerc4eb7f42017-10-06 10:41:51 +0200669 return EXIT_FAILURE;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +0200670 }
671
672 if (debug) {
673 print_bpf_prog_info();
674 }
675
676 /* Unload/stop tracepoint event by closing fd's */
677 if (errors_only) {
678 /* The prog_fd[i] and event_fd[i] depend on the
679 * order the functions was defined in _kern.c
680 */
681 close(event_fd[2]); /* tracepoint/xdp/xdp_redirect */
682 close(prog_fd[2]); /* func: trace_xdp_redirect */
683 close(event_fd[3]); /* tracepoint/xdp/xdp_redirect_map */
684 close(prog_fd[3]); /* func: trace_xdp_redirect_map */
685 }
686
687 stats_poll(interval, errors_only);
688
689 return ret;
690}