blob: 4c5022e6479eb21339bb00aa2d62647fa98c4e44 [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac09142018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
Magnus Karlsson248c7f92019-02-21 10:21:27 +01004#include <asm/barrier.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02005#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +01009#include <linux/compiler.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020010#include <linux/if_link.h>
11#include <linux/if_xdp.h>
12#include <linux/if_ether.h>
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053013#include <linux/ip.h>
14#include <linux/udp.h>
15#include <arpa/inet.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010016#include <locale.h>
17#include <net/ethernet.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020018#include <net/if.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010019#include <poll.h>
20#include <pthread.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020021#include <signal.h>
22#include <stdbool.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010026#include <sys/mman.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020027#include <sys/resource.h>
28#include <sys/socket.h>
Magnus Karlsson248c7f92019-02-21 10:21:27 +010029#include <sys/types.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020030#include <time.h>
31#include <unistd.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020032
Toke Høiland-Jørgensen7cf245a2020-01-20 14:06:49 +010033#include <bpf/libbpf.h>
34#include <bpf/xsk.h>
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070035#include <bpf/bpf.h>
Toke Høiland-Jørgensen7cf245a2020-01-20 14:06:49 +010036#include "xdpsock.h"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020037
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020038#ifndef SOL_XDP
39#define SOL_XDP 283
40#endif
41
42#ifndef AF_XDP
43#define AF_XDP 44
44#endif
45
46#ifndef PF_XDP
47#define PF_XDP AF_XDP
48#endif
49
Magnus Karlsson248c7f92019-02-21 10:21:27 +010050#define NUM_FRAMES (4 * 1024)
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053051#define MIN_PKT_SIZE 64
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020052
53#define DEBUG_HEXDUMP 0
54
Björn Töpela412ef52018-06-04 13:57:14 +020055typedef __u64 u64;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020056typedef __u32 u32;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053057typedef __u16 u16;
58typedef __u8 u8;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020059
60static unsigned long prev_time;
61
62enum benchmark_type {
63 BENCH_RXDROP = 0,
64 BENCH_TXONLY = 1,
65 BENCH_L2FWD = 2,
66};
67
68static enum benchmark_type opt_bench = BENCH_RXDROP;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +010069static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020070static const char *opt_if = "";
71static int opt_ifindex;
72static int opt_queue;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +053073static unsigned long opt_duration;
74static unsigned long start_time;
75static bool benchmark_done;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +053076static u32 opt_batch_size = 64;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +053077static int opt_pkt_count;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +053078static u16 opt_pkt_size = MIN_PKT_SIZE;
Jay Jayatheerthan46e32682019-12-20 14:25:30 +053079static u32 opt_pkt_fill_pattern = 0x12345678;
Ciara Loftusb36c3202020-07-08 07:28:34 +000080static bool opt_extra_stats;
Magnus Karlsson74e00672020-09-10 10:31:06 +020081static bool opt_quiet;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020082static int opt_poll;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020083static int opt_interval = 1;
Magnus Karlsson46738f72019-08-14 09:27:21 +020084static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
Kevin Laatzc543f542019-08-27 02:25:28 +000085static u32 opt_umem_flags;
86static int opt_unaligned_chunks;
Kevin Laatz3945b372019-08-27 02:25:30 +000087static int opt_mmap_flags;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +030088static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
Magnus Karlsson46738f72019-08-14 09:27:21 +020089static int opt_timeout = 1000;
90static bool opt_need_wakeup = true;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +010091static u32 opt_num_xsks = 1;
92static u32 prog_id;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020093
Ciara Loftus2e8806f2020-10-02 13:36:10 +000094struct xsk_ring_stats {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020095 unsigned long rx_npkts;
96 unsigned long tx_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +000097 unsigned long rx_dropped_npkts;
98 unsigned long rx_invalid_npkts;
99 unsigned long tx_invalid_npkts;
100 unsigned long rx_full_npkts;
101 unsigned long rx_fill_empty_npkts;
102 unsigned long tx_empty_npkts;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200103 unsigned long prev_rx_npkts;
104 unsigned long prev_tx_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000105 unsigned long prev_rx_dropped_npkts;
106 unsigned long prev_rx_invalid_npkts;
107 unsigned long prev_tx_invalid_npkts;
108 unsigned long prev_rx_full_npkts;
109 unsigned long prev_rx_fill_empty_npkts;
110 unsigned long prev_tx_empty_npkts;
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000111};
112
113struct xsk_umem_info {
114 struct xsk_ring_prod fq;
115 struct xsk_ring_cons cq;
116 struct xsk_umem *umem;
117 void *buffer;
118};
119
120struct xsk_socket_info {
121 struct xsk_ring_cons rx;
122 struct xsk_ring_prod tx;
123 struct xsk_umem_info *umem;
124 struct xsk_socket *xsk;
125 struct xsk_ring_stats ring_stats;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100126 u32 outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200127};
128
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200129static int num_socks;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100130struct xsk_socket_info *xsks[MAX_SOCKS];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200131
132static unsigned long get_nsecs(void)
133{
134 struct timespec ts;
135
136 clock_gettime(CLOCK_MONOTONIC, &ts);
137 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
138}
139
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200140static void print_benchmark(bool running)
141{
142 const char *bench_str = "INVALID";
143
144 if (opt_bench == BENCH_RXDROP)
145 bench_str = "rxdrop";
146 else if (opt_bench == BENCH_TXONLY)
147 bench_str = "txonly";
148 else if (opt_bench == BENCH_L2FWD)
149 bench_str = "l2fwd";
150
151 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
152 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
153 printf("xdp-skb ");
154 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
155 printf("xdp-drv ");
156 else
157 printf(" ");
158
159 if (opt_poll)
160 printf("poll() ");
161
162 if (running) {
163 printf("running...");
164 fflush(stdout);
165 }
166}
167
Ciara Loftusb36c3202020-07-08 07:28:34 +0000168static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
169{
170 struct xdp_statistics stats;
171 socklen_t optlen;
172 int err;
173
174 optlen = sizeof(stats);
175 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
176 if (err)
177 return err;
178
179 if (optlen == sizeof(struct xdp_statistics)) {
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000180 xsk->ring_stats.rx_dropped_npkts = stats.rx_dropped;
181 xsk->ring_stats.rx_invalid_npkts = stats.rx_invalid_descs;
182 xsk->ring_stats.tx_invalid_npkts = stats.tx_invalid_descs;
183 xsk->ring_stats.rx_full_npkts = stats.rx_ring_full;
184 xsk->ring_stats.rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
185 xsk->ring_stats.tx_empty_npkts = stats.tx_ring_empty_descs;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000186 return 0;
187 }
188
189 return -EINVAL;
190}
191
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200192static void dump_stats(void)
193{
194 unsigned long now = get_nsecs();
195 long dt = now - prev_time;
196 int i;
197
198 prev_time = now;
199
Prashant Bhole11c3f512018-08-31 10:00:49 +0900200 for (i = 0; i < num_socks && xsks[i]; i++) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200201 char *fmt = "%-15s %'-11.0f %'-11lu\n";
Ciara Loftusb36c3202020-07-08 07:28:34 +0000202 double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
203 tx_invalid_pps, tx_empty_pps;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200204
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000205 rx_pps = (xsks[i]->ring_stats.rx_npkts - xsks[i]->ring_stats.prev_rx_npkts) *
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200206 1000000000. / dt;
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000207 tx_pps = (xsks[i]->ring_stats.tx_npkts - xsks[i]->ring_stats.prev_tx_npkts) *
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200208 1000000000. / dt;
209
210 printf("\n sock%d@", i);
211 print_benchmark(false);
212 printf("\n");
213
214 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
215 dt / 1000000000.);
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000216 printf(fmt, "rx", rx_pps, xsks[i]->ring_stats.rx_npkts);
217 printf(fmt, "tx", tx_pps, xsks[i]->ring_stats.tx_npkts);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200218
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000219 xsks[i]->ring_stats.prev_rx_npkts = xsks[i]->ring_stats.rx_npkts;
220 xsks[i]->ring_stats.prev_tx_npkts = xsks[i]->ring_stats.tx_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000221
222 if (opt_extra_stats) {
223 if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000224 dropped_pps = (xsks[i]->ring_stats.rx_dropped_npkts -
225 xsks[i]->ring_stats.prev_rx_dropped_npkts) *
226 1000000000. / dt;
227 rx_invalid_pps = (xsks[i]->ring_stats.rx_invalid_npkts -
228 xsks[i]->ring_stats.prev_rx_invalid_npkts) *
229 1000000000. / dt;
230 tx_invalid_pps = (xsks[i]->ring_stats.tx_invalid_npkts -
231 xsks[i]->ring_stats.prev_tx_invalid_npkts) *
232 1000000000. / dt;
233 full_pps = (xsks[i]->ring_stats.rx_full_npkts -
234 xsks[i]->ring_stats.prev_rx_full_npkts) *
235 1000000000. / dt;
236 fill_empty_pps = (xsks[i]->ring_stats.rx_fill_empty_npkts -
237 xsks[i]->ring_stats.prev_rx_fill_empty_npkts) *
238 1000000000. / dt;
239 tx_empty_pps = (xsks[i]->ring_stats.tx_empty_npkts -
240 xsks[i]->ring_stats.prev_tx_empty_npkts) *
241 1000000000. / dt;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000242
243 printf(fmt, "rx dropped", dropped_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000244 xsks[i]->ring_stats.rx_dropped_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000245 printf(fmt, "rx invalid", rx_invalid_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000246 xsks[i]->ring_stats.rx_invalid_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000247 printf(fmt, "tx invalid", tx_invalid_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000248 xsks[i]->ring_stats.tx_invalid_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000249 printf(fmt, "rx queue full", full_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000250 xsks[i]->ring_stats.rx_full_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000251 printf(fmt, "fill ring empty", fill_empty_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000252 xsks[i]->ring_stats.rx_fill_empty_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000253 printf(fmt, "tx ring empty", tx_empty_pps,
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000254 xsks[i]->ring_stats.tx_empty_npkts);
Ciara Loftusb36c3202020-07-08 07:28:34 +0000255
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000256 xsks[i]->ring_stats.prev_rx_dropped_npkts =
257 xsks[i]->ring_stats.rx_dropped_npkts;
258 xsks[i]->ring_stats.prev_rx_invalid_npkts =
259 xsks[i]->ring_stats.rx_invalid_npkts;
260 xsks[i]->ring_stats.prev_tx_invalid_npkts =
261 xsks[i]->ring_stats.tx_invalid_npkts;
262 xsks[i]->ring_stats.prev_rx_full_npkts =
263 xsks[i]->ring_stats.rx_full_npkts;
264 xsks[i]->ring_stats.prev_rx_fill_empty_npkts =
265 xsks[i]->ring_stats.rx_fill_empty_npkts;
266 xsks[i]->ring_stats.prev_tx_empty_npkts =
267 xsks[i]->ring_stats.tx_empty_npkts;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000268 } else {
269 printf("%-15s\n", "Error retrieving extra stats");
270 }
271 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200272 }
273}
274
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530275static bool is_benchmark_done(void)
276{
277 if (opt_duration > 0) {
278 unsigned long dt = (get_nsecs() - start_time);
279
280 if (dt >= opt_duration)
281 benchmark_done = true;
282 }
283 return benchmark_done;
284}
285
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200286static void *poller(void *arg)
287{
288 (void)arg;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530289 while (!is_benchmark_done()) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200290 sleep(opt_interval);
291 dump_stats();
292 }
293
294 return NULL;
295}
296
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100297static void remove_xdp_program(void)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200298{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100299 u32 curr_prog_id = 0;
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100300
Maciej Fijalkowski3b7a8ec2019-02-01 22:42:30 +0100301 if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
302 printf("bpf_get_link_xdp_id failed\n");
303 exit(EXIT_FAILURE);
304 }
305 if (prog_id == curr_prog_id)
306 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
307 else if (!curr_prog_id)
308 printf("couldn't find a prog id on a given interface\n");
309 else
310 printf("program on interface changed, not removing\n");
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100311}
312
313static void int_exit(int sig)
314{
Jay Jayatheerthan69525582019-12-20 14:25:26 +0530315 benchmark_done = true;
316}
317
318static void xdpsock_cleanup(void)
319{
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100320 struct xsk_umem *umem = xsks[0]->umem->umem;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100321 int i;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100322
323 dump_stats();
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100324 for (i = 0; i < num_socks; i++)
325 xsk_socket__delete(xsks[i]->xsk);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100326 (void)xsk_umem__delete(umem);
327 remove_xdp_program();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200328}
329
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100330static void __exit_with_error(int error, const char *file, const char *func,
331 int line)
332{
333 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
334 line, error, strerror(error));
335 dump_stats();
336 remove_xdp_program();
337 exit(EXIT_FAILURE);
338}
339
340#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
341 __LINE__)
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100342static void swap_mac_addresses(void *data)
343{
344 struct ether_header *eth = (struct ether_header *)data;
345 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
346 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
347 struct ether_addr tmp;
348
349 tmp = *src_addr;
350 *src_addr = *dst_addr;
351 *dst_addr = tmp;
352}
353
354static void hex_dump(void *pkt, size_t length, u64 addr)
355{
356 const unsigned char *address = (unsigned char *)pkt;
357 const unsigned char *line = address;
358 size_t line_size = 32;
359 unsigned char c;
360 char buf[32];
361 int i = 0;
362
363 if (!DEBUG_HEXDUMP)
364 return;
365
366 sprintf(buf, "addr=%llu", addr);
367 printf("length = %zu\n", length);
368 printf("%s | ", buf);
369 while (length-- > 0) {
370 printf("%02X ", *address++);
371 if (!(++i % line_size) || (length == 0 && i % line_size)) {
372 if (length == 0) {
373 while (i++ % line_size)
374 printf("__ ");
375 }
376 printf(" | "); /* right close */
377 while (line < address) {
378 c = *line++;
379 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
380 }
381 printf("\n");
382 if (length > 0)
383 printf("%s | ", buf);
384 }
385 }
386 printf("\n");
387}
388
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530389static void *memset32_htonl(void *dest, u32 val, u32 size)
390{
391 u32 *ptr = (u32 *)dest;
392 int i;
393
394 val = htonl(val);
395
396 for (i = 0; i < (size & (~0x3)); i += 4)
397 ptr[i >> 2] = val;
398
399 for (; i < size; i++)
400 ((char *)dest)[i] = ((char *)&val)[i & 3];
401
402 return dest;
403}
404
405/*
406 * This function code has been taken from
407 * Linux kernel lib/checksum.c
408 */
409static inline unsigned short from32to16(unsigned int x)
410{
411 /* add up 16-bit and 16-bit for 16+c bit */
412 x = (x & 0xffff) + (x >> 16);
413 /* add up carry.. */
414 x = (x & 0xffff) + (x >> 16);
415 return x;
416}
417
418/*
419 * This function code has been taken from
420 * Linux kernel lib/checksum.c
421 */
422static unsigned int do_csum(const unsigned char *buff, int len)
423{
424 unsigned int result = 0;
425 int odd;
426
427 if (len <= 0)
428 goto out;
429 odd = 1 & (unsigned long)buff;
430 if (odd) {
431#ifdef __LITTLE_ENDIAN
432 result += (*buff << 8);
433#else
434 result = *buff;
435#endif
436 len--;
437 buff++;
438 }
439 if (len >= 2) {
440 if (2 & (unsigned long)buff) {
441 result += *(unsigned short *)buff;
442 len -= 2;
443 buff += 2;
444 }
445 if (len >= 4) {
446 const unsigned char *end = buff +
447 ((unsigned int)len & ~3);
448 unsigned int carry = 0;
449
450 do {
451 unsigned int w = *(unsigned int *)buff;
452
453 buff += 4;
454 result += carry;
455 result += w;
456 carry = (w > result);
457 } while (buff < end);
458 result += carry;
459 result = (result & 0xffff) + (result >> 16);
460 }
461 if (len & 2) {
462 result += *(unsigned short *)buff;
463 buff += 2;
464 }
465 }
466 if (len & 1)
467#ifdef __LITTLE_ENDIAN
468 result += *buff;
469#else
470 result += (*buff << 8);
471#endif
472 result = from32to16(result);
473 if (odd)
474 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
475out:
476 return result;
477}
478
479__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
480
481/*
482 * This is a version of ip_compute_csum() optimized for IP headers,
483 * which always checksum on 4 octet boundaries.
484 * This function code has been taken from
485 * Linux kernel lib/checksum.c
486 */
487__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
488{
489 return (__force __sum16)~do_csum(iph, ihl * 4);
490}
491
492/*
493 * Fold a partial checksum
494 * This function code has been taken from
495 * Linux kernel include/asm-generic/checksum.h
496 */
497static inline __sum16 csum_fold(__wsum csum)
498{
499 u32 sum = (__force u32)csum;
500
501 sum = (sum & 0xffff) + (sum >> 16);
502 sum = (sum & 0xffff) + (sum >> 16);
503 return (__force __sum16)~sum;
504}
505
506/*
507 * This function code has been taken from
508 * Linux kernel lib/checksum.c
509 */
510static inline u32 from64to32(u64 x)
511{
512 /* add up 32-bit and 32-bit for 32+c bit */
513 x = (x & 0xffffffff) + (x >> 32);
514 /* add up carry.. */
515 x = (x & 0xffffffff) + (x >> 32);
516 return (u32)x;
517}
518
519__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
520 __u32 len, __u8 proto, __wsum sum);
521
522/*
523 * This function code has been taken from
524 * Linux kernel lib/checksum.c
525 */
526__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
527 __u32 len, __u8 proto, __wsum sum)
528{
529 unsigned long long s = (__force u32)sum;
530
531 s += (__force u32)saddr;
532 s += (__force u32)daddr;
533#ifdef __BIG_ENDIAN__
534 s += proto + len;
535#else
536 s += (proto + len) << 8;
537#endif
538 return (__force __wsum)from64to32(s);
539}
540
541/*
542 * This function has been taken from
543 * Linux kernel include/asm-generic/checksum.h
544 */
545static inline __sum16
546csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
547 __u8 proto, __wsum sum)
548{
549 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
550}
551
552static inline u16 udp_csum(u32 saddr, u32 daddr, u32 len,
553 u8 proto, u16 *udp_pkt)
554{
555 u32 csum = 0;
556 u32 cnt = 0;
557
558 /* udp hdr and data */
559 for (; cnt < len; cnt += 2)
560 csum += udp_pkt[cnt >> 1];
561
562 return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
563}
564
565#define ETH_FCS_SIZE 4
566
567#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
568 sizeof(struct udphdr))
569
570#define PKT_SIZE (opt_pkt_size - ETH_FCS_SIZE)
571#define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr))
572#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
573#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
574
575static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE];
576
577static void gen_eth_hdr_data(void)
578{
579 struct udphdr *udp_hdr = (struct udphdr *)(pkt_data +
580 sizeof(struct ethhdr) +
581 sizeof(struct iphdr));
582 struct iphdr *ip_hdr = (struct iphdr *)(pkt_data +
583 sizeof(struct ethhdr));
584 struct ethhdr *eth_hdr = (struct ethhdr *)pkt_data;
585
586 /* ethernet header */
587 memcpy(eth_hdr->h_dest, "\x3c\xfd\xfe\x9e\x7f\x71", ETH_ALEN);
588 memcpy(eth_hdr->h_source, "\xec\xb1\xd7\x98\x3a\xc0", ETH_ALEN);
589 eth_hdr->h_proto = htons(ETH_P_IP);
590
591 /* IP header */
592 ip_hdr->version = IPVERSION;
593 ip_hdr->ihl = 0x5; /* 20 byte header */
594 ip_hdr->tos = 0x0;
595 ip_hdr->tot_len = htons(IP_PKT_SIZE);
596 ip_hdr->id = 0;
597 ip_hdr->frag_off = 0;
598 ip_hdr->ttl = IPDEFTTL;
599 ip_hdr->protocol = IPPROTO_UDP;
600 ip_hdr->saddr = htonl(0x0a0a0a10);
601 ip_hdr->daddr = htonl(0x0a0a0a20);
602
603 /* IP header checksum */
604 ip_hdr->check = 0;
605 ip_hdr->check = ip_fast_csum((const void *)ip_hdr, ip_hdr->ihl);
606
607 /* UDP header */
608 udp_hdr->source = htons(0x1000);
609 udp_hdr->dest = htons(0x1000);
610 udp_hdr->len = htons(UDP_PKT_SIZE);
611
612 /* UDP data */
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530613 memset32_htonl(pkt_data + PKT_HDR_SIZE, opt_pkt_fill_pattern,
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530614 UDP_PKT_DATA_SIZE);
615
616 /* UDP header checksum */
617 udp_hdr->check = 0;
618 udp_hdr->check = udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE,
619 IPPROTO_UDP, (u16 *)udp_hdr);
620}
621
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530622static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100623{
624 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530625 PKT_SIZE);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100626}
627
628static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
629{
630 struct xsk_umem_info *umem;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300631 struct xsk_umem_config cfg = {
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200632 /* We recommend that you set the fill ring size >= HW RX ring size +
633 * AF_XDP RX ring size. Make sure you fill up the fill ring
634 * with buffers at regular intervals, and you will with this setting
635 * avoid allocation failures in the driver. These are usually quite
636 * expensive since drivers have not been written to assume that
637 * allocation failures are common. For regular sockets, kernel
638 * allocated memory is used that only runs out in OOM situations
639 * that should be rare.
640 */
641 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300642 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
643 .frame_size = opt_xsk_frame_size,
644 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
Kevin Laatzc543f542019-08-27 02:25:28 +0000645 .flags = opt_umem_flags
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300646 };
Magnus Karlsson661842c2019-11-07 18:47:39 +0100647 int ret;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100648
649 umem = calloc(1, sizeof(*umem));
650 if (!umem)
651 exit_with_error(errno);
652
653 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300654 &cfg);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100655 if (ret)
656 exit_with_error(-ret);
657
Magnus Karlsson661842c2019-11-07 18:47:39 +0100658 umem->buffer = buffer;
659 return umem;
660}
661
662static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
663{
664 int ret, i;
665 u32 idx;
666
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100667 ret = xsk_ring_prod__reserve(&umem->fq,
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200668 XSK_RING_PROD__DEFAULT_NUM_DESCS * 2, &idx);
669 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS * 2)
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100670 exit_with_error(-ret);
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200671 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; i++)
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100672 *xsk_ring_prod__fill_addr(&umem->fq, idx++) =
673 i * opt_xsk_frame_size;
Magnus Karlssonc8a039a2020-08-28 14:51:05 +0200674 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100675}
676
Magnus Karlsson661842c2019-11-07 18:47:39 +0100677static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
678 bool rx, bool tx)
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100679{
680 struct xsk_socket_config cfg;
681 struct xsk_socket_info *xsk;
Magnus Karlsson661842c2019-11-07 18:47:39 +0100682 struct xsk_ring_cons *rxr;
683 struct xsk_ring_prod *txr;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100684 int ret;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100685
686 xsk = calloc(1, sizeof(*xsk));
687 if (!xsk)
688 exit_with_error(errno);
689
690 xsk->umem = umem;
691 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
692 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100693 if (opt_num_xsks > 1)
694 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
695 else
696 cfg.libbpf_flags = 0;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100697 cfg.xdp_flags = opt_xdp_flags;
698 cfg.bind_flags = opt_xdp_bind_flags;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100699
Magnus Karlsson661842c2019-11-07 18:47:39 +0100700 rxr = rx ? &xsk->rx : NULL;
701 txr = tx ? &xsk->tx : NULL;
702 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
703 rxr, txr, &cfg);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100704 if (ret)
705 exit_with_error(-ret);
706
707 ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
708 if (ret)
709 exit_with_error(-ret);
710
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100711 return xsk;
712}
713
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200714static struct option long_options[] = {
715 {"rxdrop", no_argument, 0, 'r'},
716 {"txonly", no_argument, 0, 't'},
717 {"l2fwd", no_argument, 0, 'l'},
718 {"interface", required_argument, 0, 'i'},
719 {"queue", required_argument, 0, 'q'},
720 {"poll", no_argument, 0, 'p'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200721 {"xdp-skb", no_argument, 0, 'S'},
722 {"xdp-native", no_argument, 0, 'N'},
723 {"interval", required_argument, 0, 'n'},
Björn Töpel58c50ae2018-08-28 14:44:35 +0200724 {"zero-copy", no_argument, 0, 'z'},
725 {"copy", no_argument, 0, 'c'},
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300726 {"frame-size", required_argument, 0, 'f'},
Magnus Karlsson46738f72019-08-14 09:27:21 +0200727 {"no-need-wakeup", no_argument, 0, 'm'},
Kevin Laatzc543f542019-08-27 02:25:28 +0000728 {"unaligned", no_argument, 0, 'u'},
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100729 {"shared-umem", no_argument, 0, 'M'},
Andre Guedesb3133322019-11-14 08:28:47 -0800730 {"force", no_argument, 0, 'F'},
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530731 {"duration", required_argument, 0, 'd'},
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530732 {"batch-size", required_argument, 0, 'b'},
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530733 {"tx-pkt-count", required_argument, 0, 'C'},
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530734 {"tx-pkt-size", required_argument, 0, 's'},
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530735 {"tx-pkt-pattern", required_argument, 0, 'P'},
Ciara Loftusb36c3202020-07-08 07:28:34 +0000736 {"extra-stats", no_argument, 0, 'x'},
Magnus Karlsson74e00672020-09-10 10:31:06 +0200737 {"quiet", no_argument, 0, 'Q'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200738 {0, 0, 0, 0}
739};
740
741static void usage(const char *prog)
742{
743 const char *str =
744 " Usage: %s [OPTIONS]\n"
745 " Options:\n"
746 " -r, --rxdrop Discard all incoming packets (default)\n"
747 " -t, --txonly Only send packets\n"
748 " -l, --l2fwd MAC swap L2 forwarding\n"
749 " -i, --interface=n Run on interface n\n"
750 " -q, --queue=n Use queue n (default 0)\n"
751 " -p, --poll Use poll syscall\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200752 " -S, --xdp-skb=n Use XDP skb-mod\n"
Anton Ivanov4564a8bb2019-10-07 09:26:36 +0100753 " -N, --xdp-native=n Enforce XDP native mode\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200754 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
Björn Töpel58c50ae2018-08-28 14:44:35 +0200755 " -z, --zero-copy Force zero-copy mode.\n"
756 " -c, --copy Force copy mode.\n"
Magnus Karlsson46738f72019-08-14 09:27:21 +0200757 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
Kevin Laatzc543f542019-08-27 02:25:28 +0000758 " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
759 " -u, --unaligned Enable unaligned chunk placement\n"
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100760 " -M, --shared-umem Enable XDP_SHARED_UMEM\n"
Andre Guedesb3133322019-11-14 08:28:47 -0800761 " -F, --force Force loading the XDP prog\n"
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530762 " -d, --duration=n Duration in secs to run command.\n"
763 " Default: forever.\n"
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530764 " -b, --batch-size=n Batch size for sending or receiving\n"
765 " packets. Default: %d\n"
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530766 " -C, --tx-pkt-count=n Number of packets to send.\n"
767 " Default: Continuous packets.\n"
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530768 " -s, --tx-pkt-size=n Transmit packet size.\n"
769 " (Default: %d bytes)\n"
770 " Min size: %d, Max size %d.\n"
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530771 " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
Ciara Loftusb36c3202020-07-08 07:28:34 +0000772 " -x, --extra-stats Display extra statistics.\n"
Magnus Karlsson74e00672020-09-10 10:31:06 +0200773 " -Q, --quiet Do not display any stats.\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200774 "\n";
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530775 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530776 opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530777 XSK_UMEM__DEFAULT_FRAME_SIZE, opt_pkt_fill_pattern);
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530778
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200779 exit(EXIT_FAILURE);
780}
781
782static void parse_command_line(int argc, char **argv)
783{
784 int option_index, c;
785
786 opterr = 0;
787
788 for (;;) {
Magnus Karlsson74e00672020-09-10 10:31:06 +0200789 c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:xQ",
Magnus Karlsson46738f72019-08-14 09:27:21 +0200790 long_options, &option_index);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200791 if (c == -1)
792 break;
793
794 switch (c) {
795 case 'r':
796 opt_bench = BENCH_RXDROP;
797 break;
798 case 't':
799 opt_bench = BENCH_TXONLY;
800 break;
801 case 'l':
802 opt_bench = BENCH_L2FWD;
803 break;
804 case 'i':
805 opt_if = optarg;
806 break;
807 case 'q':
808 opt_queue = atoi(optarg);
809 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200810 case 'p':
811 opt_poll = 1;
812 break;
813 case 'S':
814 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200815 opt_xdp_bind_flags |= XDP_COPY;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200816 break;
817 case 'N':
Toke Høiland-Jørgensend50ecc42019-12-16 12:07:42 +0100818 /* default, set below */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200819 break;
820 case 'n':
821 opt_interval = atoi(optarg);
822 break;
Björn Töpel58c50ae2018-08-28 14:44:35 +0200823 case 'z':
824 opt_xdp_bind_flags |= XDP_ZEROCOPY;
825 break;
826 case 'c':
827 opt_xdp_bind_flags |= XDP_COPY;
828 break;
Kevin Laatzc543f542019-08-27 02:25:28 +0000829 case 'u':
830 opt_umem_flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
831 opt_unaligned_chunks = 1;
Kevin Laatz3945b372019-08-27 02:25:30 +0000832 opt_mmap_flags = MAP_HUGETLB;
Kevin Laatzc543f542019-08-27 02:25:28 +0000833 break;
Maciej Fijalkowski743e5682019-02-01 22:42:28 +0100834 case 'F':
835 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
836 break;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300837 case 'f':
838 opt_xsk_frame_size = atoi(optarg);
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100839 break;
Magnus Karlsson46738f72019-08-14 09:27:21 +0200840 case 'm':
841 opt_need_wakeup = false;
842 opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300843 break;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +0100844 case 'M':
845 opt_num_xsks = MAX_SOCKS;
846 break;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +0530847 case 'd':
848 opt_duration = atoi(optarg);
849 opt_duration *= 1000000000;
850 break;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530851 case 'b':
852 opt_batch_size = atoi(optarg);
853 break;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530854 case 'C':
855 opt_pkt_count = atoi(optarg);
856 break;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +0530857 case 's':
858 opt_pkt_size = atoi(optarg);
859 if (opt_pkt_size > (XSK_UMEM__DEFAULT_FRAME_SIZE) ||
860 opt_pkt_size < MIN_PKT_SIZE) {
861 fprintf(stderr,
862 "ERROR: Invalid frame size %d\n",
863 opt_pkt_size);
864 usage(basename(argv[0]));
865 }
866 break;
Jay Jayatheerthan46e32682019-12-20 14:25:30 +0530867 case 'P':
868 opt_pkt_fill_pattern = strtol(optarg, NULL, 16);
869 break;
Ciara Loftusb36c3202020-07-08 07:28:34 +0000870 case 'x':
871 opt_extra_stats = 1;
872 break;
Magnus Karlsson74e00672020-09-10 10:31:06 +0200873 case 'Q':
874 opt_quiet = 1;
875 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200876 default:
877 usage(basename(argv[0]));
878 }
879 }
880
Toke Høiland-Jørgensend50ecc42019-12-16 12:07:42 +0100881 if (!(opt_xdp_flags & XDP_FLAGS_SKB_MODE))
882 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
883
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200884 opt_ifindex = if_nametoindex(opt_if);
885 if (!opt_ifindex) {
886 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
887 opt_if);
888 usage(basename(argv[0]));
889 }
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100890
Kevin Laatzc543f542019-08-27 02:25:28 +0000891 if ((opt_xsk_frame_size & (opt_xsk_frame_size - 1)) &&
892 !opt_unaligned_chunks) {
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +0300893 fprintf(stderr, "--frame-size=%d is not a power of two\n",
894 opt_xsk_frame_size);
895 usage(basename(argv[0]));
896 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200897}
898
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100899static void kick_tx(struct xsk_socket_info *xsk)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200900{
901 int ret;
902
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100903 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
Maciej Fijalkowski8ed47e12020-02-05 05:58:34 +0100904 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
905 errno == EBUSY || errno == ENETDOWN)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200906 return;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100907 exit_with_error(errno);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200908}
909
Magnus Karlsson46738f72019-08-14 09:27:21 +0200910static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
911 struct pollfd *fds)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200912{
Kevin Laatz03895e62019-08-27 02:25:29 +0000913 struct xsk_umem_info *umem = xsk->umem;
Yonghong Songb74e21a2019-02-28 22:19:41 -0800914 u32 idx_cq = 0, idx_fq = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200915 unsigned int rcvd;
916 size_t ndescs;
917
918 if (!xsk->outstanding_tx)
919 return;
920
Magnus Karlsson3131cf62020-09-10 10:31:04 +0200921 /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
922 * really send the packets. In zero-copy mode we do not have to do this, since Tx
923 * is driven by the NAPI loop. So as an optimization, we do not have to call
924 * sendto() all the time in zero-copy mode for l2fwd.
925 */
926 if (opt_xdp_bind_flags & XDP_COPY)
927 kick_tx(xsk);
928
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530929 ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100930 xsk->outstanding_tx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200931
932 /* re-add completed Tx buffers */
Kevin Laatz03895e62019-08-27 02:25:29 +0000933 rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200934 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100935 unsigned int i;
936 int ret;
937
Kevin Laatz03895e62019-08-27 02:25:29 +0000938 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100939 while (ret != rcvd) {
940 if (ret < 0)
941 exit_with_error(-ret);
Kevin Laatz03895e62019-08-27 02:25:29 +0000942 if (xsk_ring_prod__needs_wakeup(&umem->fq))
Magnus Karlsson46738f72019-08-14 09:27:21 +0200943 ret = poll(fds, num_socks, opt_timeout);
Kevin Laatz03895e62019-08-27 02:25:29 +0000944 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100945 }
Kevin Laatz03895e62019-08-27 02:25:29 +0000946
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100947 for (i = 0; i < rcvd; i++)
Kevin Laatz03895e62019-08-27 02:25:29 +0000948 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
949 *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100950
951 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
952 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200953 xsk->outstanding_tx -= rcvd;
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000954 xsk->ring_stats.tx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200955 }
956}
957
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530958static inline void complete_tx_only(struct xsk_socket_info *xsk,
959 int batch_size)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200960{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200961 unsigned int rcvd;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100962 u32 idx;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200963
964 if (!xsk->outstanding_tx)
965 return;
966
Magnus Karlsson46738f72019-08-14 09:27:21 +0200967 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
968 kick_tx(xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200969
Jay Jayatheerthanece6e962019-12-20 14:25:28 +0530970 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200971 if (rcvd > 0) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100972 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200973 xsk->outstanding_tx -= rcvd;
Ciara Loftus2e8806f2020-10-02 13:36:10 +0000974 xsk->ring_stats.tx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200975 }
976}
977
Magnus Karlsson46738f72019-08-14 09:27:21 +0200978static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200979{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200980 unsigned int rcvd, i;
Yonghong Songb74e21a2019-02-28 22:19:41 -0800981 u32 idx_rx = 0, idx_fq = 0;
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100982 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200983
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +0530984 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200985 if (!rcvd) {
986 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
987 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200988 return;
Magnus Karlsson46738f72019-08-14 09:27:21 +0200989 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200990
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100991 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
992 while (ret != rcvd) {
993 if (ret < 0)
994 exit_with_error(-ret);
Magnus Karlsson46738f72019-08-14 09:27:21 +0200995 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
996 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlsson248c7f92019-02-21 10:21:27 +0100997 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200998 }
999
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001000 for (i = 0; i < rcvd; i++) {
1001 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1002 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
Kevin Laatz03895e62019-08-27 02:25:29 +00001003 u64 orig = xsk_umem__extract_addr(addr);
1004
1005 addr = xsk_umem__add_offset_to_addr(addr);
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001006 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001007
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001008 hex_dump(pkt, len, addr);
Kevin Laatz03895e62019-08-27 02:25:29 +00001009 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001010 }
1011
1012 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1013 xsk_ring_cons__release(&xsk->rx, rcvd);
Ciara Loftus2e8806f2020-10-02 13:36:10 +00001014 xsk->ring_stats.rx_npkts += rcvd;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001015}
1016
1017static void rx_drop_all(void)
1018{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001019 struct pollfd fds[MAX_SOCKS] = {};
Magnus Karlsson46738f72019-08-14 09:27:21 +02001020 int i, ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001021
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001022 for (i = 0; i < num_socks; i++) {
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001023 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001024 fds[i].events = POLLIN;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001025 }
1026
1027 for (;;) {
1028 if (opt_poll) {
Magnus Karlsson46738f72019-08-14 09:27:21 +02001029 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001030 if (ret <= 0)
1031 continue;
1032 }
1033
1034 for (i = 0; i < num_socks; i++)
Magnus Karlsson46738f72019-08-14 09:27:21 +02001035 rx_drop(xsks[i], fds);
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301036
1037 if (benchmark_done)
1038 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001039 }
1040}
1041
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001042static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001043{
Magnus Karlsson46738f72019-08-14 09:27:21 +02001044 u32 idx;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301045 unsigned int i;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001046
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301047 while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
1048 batch_size) {
1049 complete_tx_only(xsk, batch_size);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001050 }
1051
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301052 for (i = 0; i < batch_size; i++) {
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301053 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
1054 idx + i);
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001055 tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301056 tx_desc->len = PKT_SIZE;
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301057 }
1058
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301059 xsk_ring_prod__submit(&xsk->tx, batch_size);
1060 xsk->outstanding_tx += batch_size;
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001061 *frame_nb += batch_size;
1062 *frame_nb %= NUM_FRAMES;
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301063 complete_tx_only(xsk, batch_size);
1064}
1065
1066static inline int get_batch_size(int pkt_cnt)
1067{
1068 if (!opt_pkt_count)
1069 return opt_batch_size;
1070
1071 if (pkt_cnt + opt_batch_size <= opt_pkt_count)
1072 return opt_batch_size;
1073
1074 return opt_pkt_count - pkt_cnt;
1075}
1076
1077static void complete_tx_only_all(void)
1078{
1079 bool pending;
1080 int i;
1081
1082 do {
1083 pending = false;
1084 for (i = 0; i < num_socks; i++) {
1085 if (xsks[i]->outstanding_tx) {
1086 complete_tx_only(xsks[i], opt_batch_size);
1087 pending = !!xsks[i]->outstanding_tx;
1088 }
1089 }
1090 } while (pending);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001091}
1092
1093static void tx_only_all(void)
1094{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001095 struct pollfd fds[MAX_SOCKS] = {};
Magnus Karlsson46738f72019-08-14 09:27:21 +02001096 u32 frame_nb[MAX_SOCKS] = {};
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301097 int pkt_cnt = 0;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001098 int i, ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001099
Magnus Karlsson46738f72019-08-14 09:27:21 +02001100 for (i = 0; i < num_socks; i++) {
1101 fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
1102 fds[0].events = POLLOUT;
1103 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001104
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301105 while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
1106 int batch_size = get_batch_size(pkt_cnt);
1107
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001108 if (opt_poll) {
Magnus Karlsson46738f72019-08-14 09:27:21 +02001109 ret = poll(fds, num_socks, opt_timeout);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001110 if (ret <= 0)
1111 continue;
1112
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001113 if (!(fds[0].revents & POLLOUT))
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001114 continue;
1115 }
1116
Magnus Karlsson46738f72019-08-14 09:27:21 +02001117 for (i = 0; i < num_socks; i++)
Weqaar Janjuab69e56c2020-08-29 00:17:17 +08001118 tx_only(xsks[i], &frame_nb[i], batch_size);
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301119
1120 pkt_cnt += batch_size;
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301121
1122 if (benchmark_done)
1123 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001124 }
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301125
1126 if (opt_pkt_count)
1127 complete_tx_only_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001128}
1129
Magnus Karlsson46738f72019-08-14 09:27:21 +02001130static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001131{
Magnus Karlsson46738f72019-08-14 09:27:21 +02001132 unsigned int rcvd, i;
1133 u32 idx_rx = 0, idx_tx = 0;
1134 int ret;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001135
Magnus Karlsson46738f72019-08-14 09:27:21 +02001136 complete_tx_l2fwd(xsk, fds);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001137
Jay Jayatheerthancd9e72b62019-12-20 14:25:27 +05301138 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001139 if (!rcvd) {
1140 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
1141 ret = poll(fds, num_socks, opt_timeout);
1142 return;
1143 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001144
Magnus Karlsson46738f72019-08-14 09:27:21 +02001145 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
1146 while (ret != rcvd) {
1147 if (ret < 0)
1148 exit_with_error(-ret);
Magnus Karlsson5a2a0dd2020-09-10 10:31:05 +02001149 complete_tx_l2fwd(xsk, fds);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001150 if (xsk_ring_prod__needs_wakeup(&xsk->tx))
1151 kick_tx(xsk);
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001152 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001153 }
1154
1155 for (i = 0; i < rcvd; i++) {
1156 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1157 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
Ciara Loftus5a712e12019-09-13 10:39:48 +00001158 u64 orig = addr;
Kevin Laatz03895e62019-08-27 02:25:29 +00001159
1160 addr = xsk_umem__add_offset_to_addr(addr);
Magnus Karlsson46738f72019-08-14 09:27:21 +02001161 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1162
1163 swap_mac_addresses(pkt);
1164
1165 hex_dump(pkt, len, addr);
Kevin Laatz03895e62019-08-27 02:25:29 +00001166 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001167 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
1168 }
1169
1170 xsk_ring_prod__submit(&xsk->tx, rcvd);
1171 xsk_ring_cons__release(&xsk->rx, rcvd);
1172
Ciara Loftus2e8806f2020-10-02 13:36:10 +00001173 xsk->ring_stats.rx_npkts += rcvd;
Magnus Karlsson46738f72019-08-14 09:27:21 +02001174 xsk->outstanding_tx += rcvd;
1175}
1176
1177static void l2fwd_all(void)
1178{
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001179 struct pollfd fds[MAX_SOCKS] = {};
Magnus Karlsson46738f72019-08-14 09:27:21 +02001180 int i, ret;
1181
Magnus Karlsson46738f72019-08-14 09:27:21 +02001182 for (i = 0; i < num_socks; i++) {
1183 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
1184 fds[i].events = POLLOUT | POLLIN;
1185 }
1186
1187 for (;;) {
1188 if (opt_poll) {
1189 ret = poll(fds, num_socks, opt_timeout);
1190 if (ret <= 0)
1191 continue;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001192 }
1193
Magnus Karlsson46738f72019-08-14 09:27:21 +02001194 for (i = 0; i < num_socks; i++)
1195 l2fwd(xsks[i], fds);
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301196
1197 if (benchmark_done)
1198 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001199 }
1200}
1201
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001202static void load_xdp_program(char **argv, struct bpf_object **obj)
1203{
1204 struct bpf_prog_load_attr prog_load_attr = {
1205 .prog_type = BPF_PROG_TYPE_XDP,
1206 };
1207 char xdp_filename[256];
1208 int prog_fd;
1209
1210 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
1211 prog_load_attr.file = xdp_filename;
1212
1213 if (bpf_prog_load_xattr(&prog_load_attr, obj, &prog_fd))
1214 exit(EXIT_FAILURE);
1215 if (prog_fd < 0) {
1216 fprintf(stderr, "ERROR: no program found: %s\n",
1217 strerror(prog_fd));
1218 exit(EXIT_FAILURE);
1219 }
1220
1221 if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
1222 fprintf(stderr, "ERROR: link set xdp fd failed\n");
1223 exit(EXIT_FAILURE);
1224 }
1225}
1226
1227static void enter_xsks_into_map(struct bpf_object *obj)
1228{
1229 struct bpf_map *map;
1230 int i, xsks_map;
1231
1232 map = bpf_object__find_map_by_name(obj, "xsks_map");
1233 xsks_map = bpf_map__fd(map);
1234 if (xsks_map < 0) {
1235 fprintf(stderr, "ERROR: no xsks map found: %s\n",
1236 strerror(xsks_map));
1237 exit(EXIT_FAILURE);
1238 }
1239
1240 for (i = 0; i < num_socks; i++) {
1241 int fd = xsk_socket__fd(xsks[i]->xsk);
1242 int key, ret;
1243
1244 key = i;
1245 ret = bpf_map_update_elem(xsks_map, &key, &fd, 0);
1246 if (ret) {
1247 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
1248 exit(EXIT_FAILURE);
1249 }
1250 }
1251}
1252
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001253int main(int argc, char **argv)
1254{
1255 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
Magnus Karlsson661842c2019-11-07 18:47:39 +01001256 bool rx = false, tx = false;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001257 struct xsk_umem_info *umem;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001258 struct bpf_object *obj;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001259 pthread_t pt;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001260 int i, ret;
Magnus Karlsson248c7f92019-02-21 10:21:27 +01001261 void *bufs;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001262
1263 parse_command_line(argc, argv);
1264
1265 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
1266 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
1267 strerror(errno));
1268 exit(EXIT_FAILURE);
1269 }
1270
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001271 if (opt_num_xsks > 1)
1272 load_xdp_program(argv, &obj);
1273
Kevin Laatz3945b372019-08-27 02:25:30 +00001274 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
1275 bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size,
1276 PROT_READ | PROT_WRITE,
1277 MAP_PRIVATE | MAP_ANONYMOUS | opt_mmap_flags, -1, 0);
1278 if (bufs == MAP_FAILED) {
1279 printf("ERROR: mmap failed\n");
1280 exit(EXIT_FAILURE);
1281 }
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001282
1283 /* Create sockets... */
Maxim Mikityanskiy123e8da12019-06-26 17:35:27 +03001284 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
Magnus Karlsson661842c2019-11-07 18:47:39 +01001285 if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) {
1286 rx = true;
1287 xsk_populate_fill_ring(umem);
1288 }
1289 if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY)
1290 tx = true;
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001291 for (i = 0; i < opt_num_xsks; i++)
Magnus Karlsson661842c2019-11-07 18:47:39 +01001292 xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001293
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301294 if (opt_bench == BENCH_TXONLY) {
1295 gen_eth_hdr_data();
1296
Magnus Karlsson661842c2019-11-07 18:47:39 +01001297 for (i = 0; i < NUM_FRAMES; i++)
1298 gen_eth_frame(umem, i * opt_xsk_frame_size);
Jay Jayatheerthan4a3c23a2019-12-20 14:25:29 +05301299 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001300
Magnus Karlsson2e5d72c2019-11-07 18:47:37 +01001301 if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY)
1302 enter_xsks_into_map(obj);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001303
1304 signal(SIGINT, int_exit);
1305 signal(SIGTERM, int_exit);
1306 signal(SIGABRT, int_exit);
1307
1308 setlocale(LC_ALL, "");
1309
Magnus Karlsson74e00672020-09-10 10:31:06 +02001310 if (!opt_quiet) {
1311 ret = pthread_create(&pt, NULL, poller, NULL);
1312 if (ret)
1313 exit_with_error(ret);
1314 }
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001315
1316 prev_time = get_nsecs();
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301317 start_time = prev_time;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001318
1319 if (opt_bench == BENCH_RXDROP)
1320 rx_drop_all();
1321 else if (opt_bench == BENCH_TXONLY)
Magnus Karlsson46738f72019-08-14 09:27:21 +02001322 tx_only_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001323 else
Magnus Karlsson46738f72019-08-14 09:27:21 +02001324 l2fwd_all();
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001325
Jay Jayatheerthanece6e962019-12-20 14:25:28 +05301326 benchmark_done = true;
1327
Magnus Karlsson74e00672020-09-10 10:31:06 +02001328 if (!opt_quiet)
1329 pthread_join(pt, NULL);
Jay Jayatheerthand3f11b02019-12-20 14:25:25 +05301330
Jay Jayatheerthan69525582019-12-20 14:25:26 +05301331 xdpsock_cleanup();
1332
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001333 return 0;
1334}