blob: da84c760c0946b1a7ae3d0ffc692f8b42ffad501 [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac09142018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
Magnus Karlsson248c7f92019-02-21 10:21:27 +01004#include <asm/barrier.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02005#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +01009#include <linux/compiler.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020010#include <linux/if_link.h>
11#include <linux/if_xdp.h>
12#include <linux/if_ether.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010013#include <locale.h>
14#include <net/ethernet.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020015#include <net/if.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010016#include <poll.h>
17#include <pthread.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020018#include <signal.h>
19#include <stdbool.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010023#include <sys/mman.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020024#include <sys/resource.h>
25#include <sys/socket.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010026#include <sys/types.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020027#include <time.h>
28#include <unistd.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020029
Daniel T. Lee4d18f6d2019-06-16 00:14:47 +090030#include "libbpf.h"
31#include "xsk.h"
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070032#include <bpf/bpf.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020033
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020034#ifndef SOL_XDP
35#define SOL_XDP 283
36#endif
37
38#ifndef AF_XDP
39#define AF_XDP 44
40#endif
41
42#ifndef PF_XDP
43#define PF_XDP AF_XDP
44#endif
45
Magnus Karlsson248c7f92019-02-21 10:21:27 +010046#define NUM_FRAMES (4 * 1024)
47#define BATCH_SIZE 64
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020048
49#define DEBUG_HEXDUMP 0
Magnus Karlsson248c7f92019-02-21 10:21:27 +010050#define MAX_SOCKS 8
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020051
Björn Töpela412ef52018-06-04 13:57:14 +020052typedef __u64 u64;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020053typedef __u32 u32;
54
55static unsigned long prev_time;
56
57enum benchmark_type {
58 BENCH_RXDROP = 0,
59 BENCH_TXONLY = 1,
60 BENCH_L2FWD = 2,
61};
62
63static enum benchmark_type opt_bench = BENCH_RXDROP;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +010064static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020065static const char *opt_if = "";
66static int opt_ifindex;
67static int opt_queue;
68static int opt_poll;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020069static int opt_interval = 1;
Magnus Karlsson46738f72019-08-14 09:27:21 +020070static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +030071static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
Magnus Karlsson46738f72019-08-14 09:27:21 +020072static int opt_timeout = 1000;
73static bool opt_need_wakeup = true;
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +010074static __u32 prog_id;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020075
Magnus Karlsson248c7f92019-02-21 10:21:27 +010076struct xsk_umem_info {
77 struct xsk_ring_prod fq;
78 struct xsk_ring_cons cq;
79 struct xsk_umem *umem;
80 void *buffer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020081};
82
Magnus Karlsson248c7f92019-02-21 10:21:27 +010083struct xsk_socket_info {
84 struct xsk_ring_cons rx;
85 struct xsk_ring_prod tx;
86 struct xsk_umem_info *umem;
87 struct xsk_socket *xsk;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020088 unsigned long rx_npkts;
89 unsigned long tx_npkts;
90 unsigned long prev_rx_npkts;
91 unsigned long prev_tx_npkts;
Magnus Karlsson248c7f92019-02-21 10:21:27 +010092 u32 outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020093};
94
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020095static int num_socks;
Magnus Karlsson248c7f92019-02-21 10:21:27 +010096struct xsk_socket_info *xsks[MAX_SOCKS];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020097
98static unsigned long get_nsecs(void)
99{
100 struct timespec ts;
101
102 clock_gettime(CLOCK_MONOTONIC, &ts);
103 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
104}
105
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200106static void print_benchmark(bool running)
107{
108 const char *bench_str = "INVALID";
109
110 if (opt_bench == BENCH_RXDROP)
111 bench_str = "rxdrop";
112 else if (opt_bench == BENCH_TXONLY)
113 bench_str = "txonly";
114 else if (opt_bench == BENCH_L2FWD)
115 bench_str = "l2fwd";
116
117 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
118 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
119 printf("xdp-skb ");
120 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
121 printf("xdp-drv ");
122 else
123 printf(" ");
124
125 if (opt_poll)
126 printf("poll() ");
127
128 if (running) {
129 printf("running...");
130 fflush(stdout);
131 }
132}
133
134static void dump_stats(void)
135{
136 unsigned long now = get_nsecs();
137 long dt = now - prev_time;
138 int i;
139
140 prev_time = now;
141
Prashant Bhole11c3f512018-08-31 10:00:49 +0900142 for (i = 0; i < num_socks && xsks[i]; i++) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200143 char *fmt = "%-15s %'-11.0f %'-11lu\n";
144 double rx_pps, tx_pps;
145
146 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
147 1000000000. / dt;
148 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
149 1000000000. / dt;
150
151 printf("\n sock%d@", i);
152 print_benchmark(false);
153 printf("\n");
154
155 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
156 dt / 1000000000.);
157 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
158 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
159
160 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
161 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
162 }
163}
164
165static void *poller(void *arg)
166{
167 (void)arg;
168 for (;;) {
169 sleep(opt_interval);
170 dump_stats();
171 }
172
173 return NULL;
174}
175
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100176static void remove_xdp_program(void)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200177{
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100178 __u32 curr_prog_id = 0;
179
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100180 if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
181 printf("bpf_get_link_xdp_id failed\n");
182 exit(EXIT_FAILURE);
183 }
184 if (prog_id == curr_prog_id)
185 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
186 else if (!curr_prog_id)
187 printf("couldn't find a prog id on a given interface\n");
188 else
189 printf("program on interface changed, not removing\n");
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100190}
191
192static void int_exit(int sig)
193{
194 struct xsk_umem *umem = xsks[0]->umem->umem;
195
196 (void)sig;
197
198 dump_stats();
199 xsk_socket__delete(xsks[0]->xsk);
200 (void)xsk_umem__delete(umem);
201 remove_xdp_program();
202
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200203 exit(EXIT_SUCCESS);
204}
205
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100206static void __exit_with_error(int error, const char *file, const char *func,
207 int line)
208{
209 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
210 line, error, strerror(error));
211 dump_stats();
212 remove_xdp_program();
213 exit(EXIT_FAILURE);
214}
215
216#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
217 __LINE__)
218
219static const char pkt_data[] =
220 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
221 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
222 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
223 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
224
225static void swap_mac_addresses(void *data)
226{
227 struct ether_header *eth = (struct ether_header *)data;
228 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
229 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
230 struct ether_addr tmp;
231
232 tmp = *src_addr;
233 *src_addr = *dst_addr;
234 *dst_addr = tmp;
235}
236
237static void hex_dump(void *pkt, size_t length, u64 addr)
238{
239 const unsigned char *address = (unsigned char *)pkt;
240 const unsigned char *line = address;
241 size_t line_size = 32;
242 unsigned char c;
243 char buf[32];
244 int i = 0;
245
246 if (!DEBUG_HEXDUMP)
247 return;
248
249 sprintf(buf, "addr=%llu", addr);
250 printf("length = %zu\n", length);
251 printf("%s | ", buf);
252 while (length-- > 0) {
253 printf("%02X ", *address++);
254 if (!(++i % line_size) || (length == 0 && i % line_size)) {
255 if (length == 0) {
256 while (i++ % line_size)
257 printf("__ ");
258 }
259 printf(" | "); /* right close */
260 while (line < address) {
261 c = *line++;
262 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
263 }
264 printf("\n");
265 if (length > 0)
266 printf("%s | ", buf);
267 }
268 }
269 printf("\n");
270}
271
272static size_t gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
273{
274 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
275 sizeof(pkt_data) - 1);
276 return sizeof(pkt_data) - 1;
277}
278
279static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
280{
281 struct xsk_umem_info *umem;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300282 struct xsk_umem_config cfg = {
283 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
284 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
285 .frame_size = opt_xsk_frame_size,
286 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
287 };
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100288 int ret;
289
290 umem = calloc(1, sizeof(*umem));
291 if (!umem)
292 exit_with_error(errno);
293
294 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300295 &cfg);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100296 if (ret)
297 exit_with_error(-ret);
298
299 umem->buffer = buffer;
300 return umem;
301}
302
303static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
304{
305 struct xsk_socket_config cfg;
306 struct xsk_socket_info *xsk;
307 int ret;
308 u32 idx;
309 int i;
310
311 xsk = calloc(1, sizeof(*xsk));
312 if (!xsk)
313 exit_with_error(errno);
314
315 xsk->umem = umem;
316 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
317 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
318 cfg.libbpf_flags = 0;
319 cfg.xdp_flags = opt_xdp_flags;
320 cfg.bind_flags = opt_xdp_bind_flags;
321 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
322 &xsk->rx, &xsk->tx, &cfg);
323 if (ret)
324 exit_with_error(-ret);
325
326 ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
327 if (ret)
328 exit_with_error(-ret);
329
330 ret = xsk_ring_prod__reserve(&xsk->umem->fq,
331 XSK_RING_PROD__DEFAULT_NUM_DESCS,
332 &idx);
333 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
334 exit_with_error(-ret);
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300335 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++)
336 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) =
337 i * opt_xsk_frame_size;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100338 xsk_ring_prod__submit(&xsk->umem->fq,
339 XSK_RING_PROD__DEFAULT_NUM_DESCS);
340
341 return xsk;
342}
343
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200344static struct option long_options[] = {
345 {"rxdrop", no_argument, 0, 'r'},
346 {"txonly", no_argument, 0, 't'},
347 {"l2fwd", no_argument, 0, 'l'},
348 {"interface", required_argument, 0, 'i'},
349 {"queue", required_argument, 0, 'q'},
350 {"poll", no_argument, 0, 'p'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200351 {"xdp-skb", no_argument, 0, 'S'},
352 {"xdp-native", no_argument, 0, 'N'},
353 {"interval", required_argument, 0, 'n'},
Björn Töpel58c50ae2018-08-28 14:44:35 +0200354 {"zero-copy", no_argument, 0, 'z'},
355 {"copy", no_argument, 0, 'c'},
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300356 {"frame-size", required_argument, 0, 'f'},
Magnus Karlsson46738f72019-08-14 09:27:21 +0200357 {"no-need-wakeup", no_argument, 0, 'm'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200358 {0, 0, 0, 0}
359};
360
361static void usage(const char *prog)
362{
363 const char *str =
364 " Usage: %s [OPTIONS]\n"
365 " Options:\n"
366 " -r, --rxdrop Discard all incoming packets (default)\n"
367 " -t, --txonly Only send packets\n"
368 " -l, --l2fwd MAC swap L2 forwarding\n"
369 " -i, --interface=n Run on interface n\n"
370 " -q, --queue=n Use queue n (default 0)\n"
371 " -p, --poll Use poll syscall\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200372 " -S, --xdp-skb=n Use XDP skb-mod\n"
373 " -N, --xdp-native=n Enfore XDP native mode\n"
374 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
Björn Töpel58c50ae2018-08-28 14:44:35 +0200375 " -z, --zero-copy Force zero-copy mode.\n"
376 " -c, --copy Force copy mode.\n"
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300377 " -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n"
Magnus Karlsson46738f72019-08-14 09:27:21 +0200378 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200379 "\n";
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300380 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200381 exit(EXIT_FAILURE);
382}
383
384static void parse_command_line(int argc, char **argv)
385{
386 int option_index, c;
387
388 opterr = 0;
389
390 for (;;) {
Magnus Karlsson46738f72019-08-14 09:27:21 +0200391
392 c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:m",
393 long_options, &option_index);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200394 if (c == -1)
395 break;
396
397 switch (c) {
398 case 'r':
399 opt_bench = BENCH_RXDROP;
400 break;
401 case 't':
402 opt_bench = BENCH_TXONLY;
403 break;
404 case 'l':
405 opt_bench = BENCH_L2FWD;
406 break;
407 case 'i':
408 opt_if = optarg;
409 break;
410 case 'q':
411 opt_queue = atoi(optarg);
412 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200413 case 'p':
414 opt_poll = 1;
415 break;
416 case 'S':
417 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200418 opt_xdp_bind_flags |= XDP_COPY;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200419 break;
420 case 'N':
421 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
422 break;
423 case 'n':
424 opt_interval = atoi(optarg);
425 break;
Björn Töpel58c50ae2018-08-28 14:44:35 +0200426 case 'z':
427 opt_xdp_bind_flags |= XDP_ZEROCOPY;
428 break;
429 case 'c':
430 opt_xdp_bind_flags |= XDP_COPY;
431 break;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +0100432 case 'F':
433 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
434 break;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300435 case 'f':
436 opt_xsk_frame_size = atoi(optarg);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200437 case 'm':
438 opt_need_wakeup = false;
439 opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300440 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200441 default:
442 usage(basename(argv[0]));
443 }
444 }
445
446 opt_ifindex = if_nametoindex(opt_if);
447 if (!opt_ifindex) {
448 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
449 opt_if);
450 usage(basename(argv[0]));
451 }
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100452
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300453 if (opt_xsk_frame_size & (opt_xsk_frame_size - 1)) {
454 fprintf(stderr, "--frame-size=%d is not a power of two\n",
455 opt_xsk_frame_size);
456 usage(basename(argv[0]));
457 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200458}
459
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100460static void kick_tx(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200461{
462 int ret;
463
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100464 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
Magnus Karlssonc03079c2018-06-29 09:48:19 +0200465 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200466 return;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100467 exit_with_error(errno);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200468}
469
Magnus Karlsson46738f72019-08-14 09:27:21 +0200470static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
471 struct pollfd *fds)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200472{
Yonghong Songb74e21a2019-02-28 22:19:41 -0800473 u32 idx_cq = 0, idx_fq = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200474 unsigned int rcvd;
475 size_t ndescs;
476
477 if (!xsk->outstanding_tx)
478 return;
479
Magnus Karlsson46738f72019-08-14 09:27:21 +0200480 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
481 kick_tx(xsk);
482
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200483 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100484 xsk->outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200485
486 /* re-add completed Tx buffers */
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100487 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, ndescs, &idx_cq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200488 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100489 unsigned int i;
490 int ret;
491
492 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
493 while (ret != rcvd) {
494 if (ret < 0)
495 exit_with_error(-ret);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200496 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
497 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100498 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd,
499 &idx_fq);
500 }
501 for (i = 0; i < rcvd; i++)
502 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) =
503 *xsk_ring_cons__comp_addr(&xsk->umem->cq,
504 idx_cq++);
505
506 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
507 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200508 xsk->outstanding_tx -= rcvd;
509 xsk->tx_npkts += rcvd;
510 }
511}
512
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100513static inline void complete_tx_only(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200514{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200515 unsigned int rcvd;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100516 u32 idx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200517
518 if (!xsk->outstanding_tx)
519 return;
520
Magnus Karlsson46738f72019-08-14 09:27:21 +0200521 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
522 kick_tx(xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200523
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100524 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200525 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100526 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200527 xsk->outstanding_tx -= rcvd;
528 xsk->tx_npkts += rcvd;
529 }
530}
531
Magnus Karlsson46738f72019-08-14 09:27:21 +0200532static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200533{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200534 unsigned int rcvd, i;
Yonghong Songb74e21a2019-02-28 22:19:41 -0800535 u32 idx_rx = 0, idx_fq = 0;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100536 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200537
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100538 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200539 if (!rcvd) {
540 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
541 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200542 return;
Magnus Karlsson46738f72019-08-14 09:27:21 +0200543 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200544
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100545 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
546 while (ret != rcvd) {
547 if (ret < 0)
548 exit_with_error(-ret);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200549 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
550 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100551 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200552 }
553
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100554 for (i = 0; i < rcvd; i++) {
555 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
556 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
557 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200558
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100559 hex_dump(pkt, len, addr);
560 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = addr;
561 }
562
563 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
564 xsk_ring_cons__release(&xsk->rx, rcvd);
565 xsk->rx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200566}
567
568static void rx_drop_all(void)
569{
570 struct pollfd fds[MAX_SOCKS + 1];
Magnus Karlsson46738f72019-08-14 09:27:21 +0200571 int i, ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200572
573 memset(fds, 0, sizeof(fds));
574
575 for (i = 0; i < num_socks; i++) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100576 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200577 fds[i].events = POLLIN;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200578 }
579
580 for (;;) {
581 if (opt_poll) {
Magnus Karlsson46738f72019-08-14 09:27:21 +0200582 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200583 if (ret <= 0)
584 continue;
585 }
586
587 for (i = 0; i < num_socks; i++)
Magnus Karlsson46738f72019-08-14 09:27:21 +0200588 rx_drop(xsks[i], fds);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200589 }
590}
591
Magnus Karlsson46738f72019-08-14 09:27:21 +0200592static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200593{
Magnus Karlsson46738f72019-08-14 09:27:21 +0200594 u32 idx;
595
596 if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) == BATCH_SIZE) {
597 unsigned int i;
598
599 for (i = 0; i < BATCH_SIZE; i++) {
600 xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr =
601 (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
602 xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len =
603 sizeof(pkt_data) - 1;
604 }
605
606 xsk_ring_prod__submit(&xsk->tx, BATCH_SIZE);
607 xsk->outstanding_tx += BATCH_SIZE;
608 frame_nb += BATCH_SIZE;
609 frame_nb %= NUM_FRAMES;
610 }
611
612 complete_tx_only(xsk);
613}
614
615static void tx_only_all(void)
616{
617 struct pollfd fds[MAX_SOCKS];
618 u32 frame_nb[MAX_SOCKS] = {};
619 int i, ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200620
621 memset(fds, 0, sizeof(fds));
Magnus Karlsson46738f72019-08-14 09:27:21 +0200622 for (i = 0; i < num_socks; i++) {
623 fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
624 fds[0].events = POLLOUT;
625 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200626
627 for (;;) {
628 if (opt_poll) {
Magnus Karlsson46738f72019-08-14 09:27:21 +0200629 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200630 if (ret <= 0)
631 continue;
632
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100633 if (!(fds[0].revents & POLLOUT))
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200634 continue;
635 }
636
Magnus Karlsson46738f72019-08-14 09:27:21 +0200637 for (i = 0; i < num_socks; i++)
638 tx_only(xsks[i], frame_nb[i]);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200639 }
640}
641
Magnus Karlsson46738f72019-08-14 09:27:21 +0200642static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200643{
Magnus Karlsson46738f72019-08-14 09:27:21 +0200644 unsigned int rcvd, i;
645 u32 idx_rx = 0, idx_tx = 0;
646 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200647
Magnus Karlsson46738f72019-08-14 09:27:21 +0200648 complete_tx_l2fwd(xsk, fds);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200649
Magnus Karlsson46738f72019-08-14 09:27:21 +0200650 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
651 if (!rcvd) {
652 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
653 ret = poll(fds, num_socks, opt_timeout);
654 return;
655 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200656
Magnus Karlsson46738f72019-08-14 09:27:21 +0200657 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
658 while (ret != rcvd) {
659 if (ret < 0)
660 exit_with_error(-ret);
661 if (xsk_ring_prod__needs_wakeup(&xsk->tx))
662 kick_tx(xsk);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100663 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200664 }
665
666 for (i = 0; i < rcvd; i++) {
667 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
668 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
669 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
670
671 swap_mac_addresses(pkt);
672
673 hex_dump(pkt, len, addr);
674 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = addr;
675 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
676 }
677
678 xsk_ring_prod__submit(&xsk->tx, rcvd);
679 xsk_ring_cons__release(&xsk->rx, rcvd);
680
681 xsk->rx_npkts += rcvd;
682 xsk->outstanding_tx += rcvd;
683}
684
685static void l2fwd_all(void)
686{
687 struct pollfd fds[MAX_SOCKS];
688 int i, ret;
689
690 memset(fds, 0, sizeof(fds));
691
692 for (i = 0; i < num_socks; i++) {
693 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
694 fds[i].events = POLLOUT | POLLIN;
695 }
696
697 for (;;) {
698 if (opt_poll) {
699 ret = poll(fds, num_socks, opt_timeout);
700 if (ret <= 0)
701 continue;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100702 }
703
Magnus Karlsson46738f72019-08-14 09:27:21 +0200704 for (i = 0; i < num_socks; i++)
705 l2fwd(xsks[i], fds);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200706 }
707}
708
709int main(int argc, char **argv)
710{
711 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100712 struct xsk_umem_info *umem;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200713 pthread_t pt;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100714 void *bufs;
715 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200716
717 parse_command_line(argc, argv);
718
719 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
720 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
721 strerror(errno));
722 exit(EXIT_FAILURE);
723 }
724
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100725 ret = posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300726 NUM_FRAMES * opt_xsk_frame_size);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100727 if (ret)
728 exit_with_error(ret);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200729
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100730 /* Create sockets... */
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300731 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100732 xsks[num_socks++] = xsk_configure_socket(umem);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200733
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100734 if (opt_bench == BENCH_TXONLY) {
735 int i;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200736
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300737 for (i = 0; i < NUM_FRAMES; i++)
738 (void)gen_eth_frame(umem, i * opt_xsk_frame_size);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200739 }
740
741 signal(SIGINT, int_exit);
742 signal(SIGTERM, int_exit);
743 signal(SIGABRT, int_exit);
744
745 setlocale(LC_ALL, "");
746
747 ret = pthread_create(&pt, NULL, poller, NULL);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100748 if (ret)
749 exit_with_error(ret);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200750
751 prev_time = get_nsecs();
752
753 if (opt_bench == BENCH_RXDROP)
754 rx_drop_all();
755 else if (opt_bench == BENCH_TXONLY)
Magnus Karlsson46738f72019-08-14 09:27:21 +0200756 tx_only_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200757 else
Magnus Karlsson46738f72019-08-14 09:27:21 +0200758 l2fwd_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200759
760 return 0;
761}