blob: d5db045535d3b9d38fcf4dbd0ffad17bd386d015 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Ben Hutchings8ceee662008-04-27 12:55:59 +01002/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01003 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01004 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01005 * Copyright 2005-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01006 */
7
8#include <linux/socket.h>
9#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010011#include <linux/ip.h>
Ben Hutchingsc47b2d92013-09-03 17:22:23 +010012#include <linux/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010013#include <linux/tcp.h>
14#include <linux/udp.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040015#include <linux/prefetch.h>
Paul Gortmaker6eb07ca2011-09-15 19:46:05 -040016#include <linux/moduleparam.h>
Daniel Pieczko27689352013-02-13 10:54:41 +000017#include <linux/iommu.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <net/ip.h>
19#include <net/checksum.h>
20#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "efx.h"
Ben Hutchingsadd72472012-11-08 01:46:53 +000022#include "filter.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000023#include "nic.h"
Ben Hutchings3273c2e2008-05-07 13:36:19 +010024#include "selftest.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010025#include "workarounds.h"
26
Daniel Pieczko1648a232013-02-13 10:54:41 +000027/* Preferred number of descriptors to fill at once */
28#define EFX_RX_PREFERRED_BATCH 8U
Ben Hutchings8ceee662008-04-27 12:55:59 +010029
Daniel Pieczko27689352013-02-13 10:54:41 +000030/* Number of RX buffers to recycle pages for. When creating the RX page recycle
31 * ring, this number is divided by the number of buffers per page to calculate
32 * the number of pages to store in the RX page recycle ring.
33 */
34#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
Daniel Pieczko1648a232013-02-13 10:54:41 +000035#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
Steve Hodgson62b330b2010-06-01 11:20:53 +000036
Ben Hutchings8ceee662008-04-27 12:55:59 +010037/* Size of buffer allocated for skb header area. */
Jon Cooperd4ef5b62013-04-08 12:55:58 +010038#define EFX_SKB_HEADERS 128u
Ben Hutchings8ceee662008-04-27 12:55:59 +010039
Ben Hutchings8ceee662008-04-27 12:55:59 +010040/* This is the percentage fill level below which new RX descriptors
41 * will be added to the RX descriptor ring.
42 */
David Riddoch64235182012-04-11 13:12:41 +010043static unsigned int rx_refill_threshold;
Ben Hutchings8ceee662008-04-27 12:55:59 +010044
Ben Hutchings85740cdf2013-01-29 23:33:15 +000045/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
46#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
47 EFX_RX_USR_BUF_SIZE)
48
Ben Hutchings8ceee662008-04-27 12:55:59 +010049/*
50 * RX maximum head room required.
51 *
Ben Hutchings85740cdf2013-01-29 23:33:15 +000052 * This must be at least 1 to prevent overflow, plus one packet-worth
53 * to allow pipelined receives.
Ben Hutchings8ceee662008-04-27 12:55:59 +010054 */
Ben Hutchings85740cdf2013-01-29 23:33:15 +000055#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
Ben Hutchings8ceee662008-04-27 12:55:59 +010056
Ben Hutchingsb184f162013-01-29 23:33:15 +000057static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
Ben Hutchings39c9cf02010-06-23 11:31:28 +000058{
Ben Hutchingsb184f162013-01-29 23:33:15 +000059 return page_address(buf->page) + buf->page_offset;
Steve Hodgsona526f142011-02-24 23:45:16 +000060}
61
Jon Cooper43a37392012-10-18 15:49:54 +010062static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
Steve Hodgsona526f142011-02-24 23:45:16 +000063{
Jon Cooper43a37392012-10-18 15:49:54 +010064#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
65 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
Ben Hutchings39c9cf02010-06-23 11:31:28 +000066#else
Jon Cooper43a37392012-10-18 15:49:54 +010067 const u8 *data = eh + efx->rx_packet_hash_offset;
Ben Hutchings0beaca22012-01-05 18:54:04 +000068 return (u32)data[0] |
69 (u32)data[1] << 8 |
70 (u32)data[2] << 16 |
71 (u32)data[3] << 24;
Ben Hutchings39c9cf02010-06-23 11:31:28 +000072#endif
73}
74
Ben Hutchings85740cdf2013-01-29 23:33:15 +000075static inline struct efx_rx_buffer *
76efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
77{
78 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
79 return efx_rx_buffer(rx_queue, 0);
80 else
81 return rx_buf + 1;
82}
83
Daniel Pieczko27689352013-02-13 10:54:41 +000084static inline void efx_sync_rx_buffer(struct efx_nic *efx,
85 struct efx_rx_buffer *rx_buf,
86 unsigned int len)
87{
88 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
89 DMA_FROM_DEVICE);
90}
91
Daniel Pieczko1648a232013-02-13 10:54:41 +000092void efx_rx_config_page_split(struct efx_nic *efx)
93{
Andrew Rybchenko2ec03012013-11-16 11:02:27 +040094 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
Ben Hutchings950c54d2013-05-13 12:01:22 +000095 EFX_RX_BUF_ALIGNMENT);
Daniel Pieczko1648a232013-02-13 10:54:41 +000096 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
97 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
98 efx->rx_page_buf_step);
99 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
100 efx->rx_bufs_per_page;
101 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
102 efx->rx_bufs_per_page);
103}
104
Daniel Pieczko27689352013-02-13 10:54:41 +0000105/* Check the RX page recycle ring for a page that can be reused. */
106static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
107{
108 struct efx_nic *efx = rx_queue->efx;
109 struct page *page;
110 struct efx_rx_page_state *state;
111 unsigned index;
112
113 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
114 page = rx_queue->page_ring[index];
115 if (page == NULL)
116 return NULL;
117
118 rx_queue->page_ring[index] = NULL;
119 /* page_remove cannot exceed page_add. */
120 if (rx_queue->page_remove != rx_queue->page_add)
121 ++rx_queue->page_remove;
122
123 /* If page_count is 1 then we hold the only reference to this page. */
124 if (page_count(page) == 1) {
125 ++rx_queue->page_recycle_count;
126 return page;
127 } else {
128 state = page_address(page);
129 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
130 PAGE_SIZE << efx->rx_buffer_order,
131 DMA_FROM_DEVICE);
132 put_page(page);
133 ++rx_queue->page_recycle_failed;
134 }
135
136 return NULL;
137}
138
Ben Hutchings8ceee662008-04-27 12:55:59 +0100139/**
Alexandre Rames97d48a12013-01-11 12:26:21 +0000140 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
Ben Hutchings8ceee662008-04-27 12:55:59 +0100141 *
142 * @rx_queue: Efx RX queue
Ben Hutchings8ceee662008-04-27 12:55:59 +0100143 *
Daniel Pieczko1648a232013-02-13 10:54:41 +0000144 * This allocates a batch of pages, maps them for DMA, and populates
145 * struct efx_rx_buffers for each one. Return a negative error code or
146 * 0 on success. If a single page can be used for multiple buffers,
147 * then the page will either be inserted fully, or not at all.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100148 */
Jon Coopercce28792013-10-02 11:04:14 +0100149static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100150{
151 struct efx_nic *efx = rx_queue->efx;
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000152 struct efx_rx_buffer *rx_buf;
153 struct page *page;
Ben Hutchingsb590ace2013-01-10 23:51:54 +0000154 unsigned int page_offset;
Steve Hodgson62b330b2010-06-01 11:20:53 +0000155 struct efx_rx_page_state *state;
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000156 dma_addr_t dma_addr;
157 unsigned index, count;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100158
Daniel Pieczko1648a232013-02-13 10:54:41 +0000159 count = 0;
160 do {
Daniel Pieczko27689352013-02-13 10:54:41 +0000161 page = efx_reuse_page(rx_queue);
162 if (page == NULL) {
Mel Gorman453f85d2017-11-15 17:38:03 -0800163 page = alloc_pages(__GFP_COMP |
Jon Coopercce28792013-10-02 11:04:14 +0100164 (atomic ? GFP_ATOMIC : GFP_KERNEL),
Daniel Pieczko27689352013-02-13 10:54:41 +0000165 efx->rx_buffer_order);
166 if (unlikely(page == NULL))
167 return -ENOMEM;
168 dma_addr =
169 dma_map_page(&efx->pci_dev->dev, page, 0,
170 PAGE_SIZE << efx->rx_buffer_order,
171 DMA_FROM_DEVICE);
172 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
173 dma_addr))) {
174 __free_pages(page, efx->rx_buffer_order);
175 return -EIO;
176 }
177 state = page_address(page);
178 state->dma_addr = dma_addr;
179 } else {
180 state = page_address(page);
181 dma_addr = state->dma_addr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100182 }
Steve Hodgson62b330b2010-06-01 11:20:53 +0000183
Steve Hodgson62b330b2010-06-01 11:20:53 +0000184 dma_addr += sizeof(struct efx_rx_page_state);
Ben Hutchingsb590ace2013-01-10 23:51:54 +0000185 page_offset = sizeof(struct efx_rx_page_state);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100186
Daniel Pieczko1648a232013-02-13 10:54:41 +0000187 do {
188 index = rx_queue->added_count & rx_queue->ptr_mask;
189 rx_buf = efx_rx_buffer(rx_queue, index);
Andrew Rybchenko2ec03012013-11-16 11:02:27 +0400190 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000191 rx_buf->page = page;
Andrew Rybchenko2ec03012013-11-16 11:02:27 +0400192 rx_buf->page_offset = page_offset + efx->rx_ip_align;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000193 rx_buf->len = efx->rx_dma_len;
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000194 rx_buf->flags = 0;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000195 ++rx_queue->added_count;
196 get_page(page);
197 dma_addr += efx->rx_page_buf_step;
198 page_offset += efx->rx_page_buf_step;
199 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000200
201 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000202 } while (++count < efx->rx_pages_per_batch);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100203
Ben Hutchings8ceee662008-04-27 12:55:59 +0100204 return 0;
205}
206
Daniel Pieczko27689352013-02-13 10:54:41 +0000207/* Unmap a DMA-mapped page. This function is only called for the final RX
208 * buffer in a page.
209 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100210static void efx_unmap_rx_buffer(struct efx_nic *efx,
Daniel Pieczko27689352013-02-13 10:54:41 +0000211 struct efx_rx_buffer *rx_buf)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100212{
Daniel Pieczko27689352013-02-13 10:54:41 +0000213 struct page *page = rx_buf->page;
Steve Hodgson62b330b2010-06-01 11:20:53 +0000214
Daniel Pieczko27689352013-02-13 10:54:41 +0000215 if (page) {
216 struct efx_rx_page_state *state = page_address(page);
217 dma_unmap_page(&efx->pci_dev->dev,
218 state->dma_addr,
219 PAGE_SIZE << efx->rx_buffer_order,
220 DMA_FROM_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100221 }
222}
223
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100224static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
225 struct efx_rx_buffer *rx_buf,
226 unsigned int num_bufs)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100227{
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100228 do {
229 if (rx_buf->page) {
230 put_page(rx_buf->page);
231 rx_buf->page = NULL;
232 }
233 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
234 } while (--num_bufs);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100235}
236
Daniel Pieczko27689352013-02-13 10:54:41 +0000237/* Attempt to recycle the page if there is an RX recycle ring; the page can
238 * only be added if this is the final RX buffer, to prevent pages being used in
239 * the descriptor ring and appearing in the recycle ring simultaneously.
240 */
241static void efx_recycle_rx_page(struct efx_channel *channel,
242 struct efx_rx_buffer *rx_buf)
243{
244 struct page *page = rx_buf->page;
245 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
246 struct efx_nic *efx = rx_queue->efx;
247 unsigned index;
248
249 /* Only recycle the page after processing the final buffer. */
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000250 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
Daniel Pieczko27689352013-02-13 10:54:41 +0000251 return;
252
253 index = rx_queue->page_add & rx_queue->page_ptr_mask;
254 if (rx_queue->page_ring[index] == NULL) {
255 unsigned read_index = rx_queue->page_remove &
256 rx_queue->page_ptr_mask;
257
258 /* The next slot in the recycle ring is available, but
259 * increment page_remove if the read pointer currently
260 * points here.
261 */
262 if (read_index == index)
263 ++rx_queue->page_remove;
264 rx_queue->page_ring[index] = page;
265 ++rx_queue->page_add;
266 return;
267 }
268 ++rx_queue->page_recycle_full;
269 efx_unmap_rx_buffer(efx, rx_buf);
270 put_page(rx_buf->page);
271}
272
Ben Hutchings4d566062008-09-01 12:47:12 +0100273static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
274 struct efx_rx_buffer *rx_buf)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100275{
Daniel Pieczko27689352013-02-13 10:54:41 +0000276 /* Release the page reference we hold for the buffer. */
277 if (rx_buf->page)
278 put_page(rx_buf->page);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100279
Daniel Pieczko27689352013-02-13 10:54:41 +0000280 /* If this is the last buffer in a page, unmap and free it. */
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000281 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
Daniel Pieczko27689352013-02-13 10:54:41 +0000282 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100283 efx_free_rx_buffers(rx_queue, rx_buf, 1);
Steve Hodgson62b330b2010-06-01 11:20:53 +0000284 }
Daniel Pieczko27689352013-02-13 10:54:41 +0000285 rx_buf->page = NULL;
Steve Hodgson24455802010-06-01 11:20:34 +0000286}
287
Daniel Pieczko27689352013-02-13 10:54:41 +0000288/* Recycle the pages that are used by buffers that have just been received. */
Ben Hutchings734d4e12013-07-04 23:48:46 +0100289static void efx_recycle_rx_pages(struct efx_channel *channel,
290 struct efx_rx_buffer *rx_buf,
291 unsigned int n_frags)
Steve Hodgson24455802010-06-01 11:20:34 +0000292{
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +0000293 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
Steve Hodgson24455802010-06-01 11:20:34 +0000294
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000295 do {
Daniel Pieczko27689352013-02-13 10:54:41 +0000296 efx_recycle_rx_page(channel, rx_buf);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000297 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
298 } while (--n_frags);
Steve Hodgson24455802010-06-01 11:20:34 +0000299}
300
Ben Hutchings734d4e12013-07-04 23:48:46 +0100301static void efx_discard_rx_packet(struct efx_channel *channel,
302 struct efx_rx_buffer *rx_buf,
303 unsigned int n_frags)
304{
305 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
306
307 efx_recycle_rx_pages(channel, rx_buf, n_frags);
308
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100309 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
Ben Hutchings734d4e12013-07-04 23:48:46 +0100310}
311
Ben Hutchings8ceee662008-04-27 12:55:59 +0100312/**
313 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
314 * @rx_queue: RX descriptor queue
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000315 *
Ben Hutchings8ceee662008-04-27 12:55:59 +0100316 * This will aim to fill the RX descriptor queue up to
David Riddochda9ca502012-04-11 13:09:24 +0100317 * @rx_queue->@max_fill. If there is insufficient atomic
Steve Hodgson90d683a2010-06-01 11:19:39 +0000318 * memory to do so, a slow fill will be scheduled.
319 *
320 * The caller must provide serialisation (none is used here). In practise,
321 * this means this function must run from the NAPI handler, or be called
322 * when NAPI is disabled.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100323 */
Jon Coopercce28792013-10-02 11:04:14 +0100324void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100325{
Daniel Pieczko1648a232013-02-13 10:54:41 +0000326 struct efx_nic *efx = rx_queue->efx;
327 unsigned int fill_level, batch_size;
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000328 int space, rc = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100329
Ben Hutchingsd8aec742013-05-27 16:52:54 +0100330 if (!rx_queue->refill_enabled)
331 return;
332
Steve Hodgson90d683a2010-06-01 11:19:39 +0000333 /* Calculate current fill level, and exit if we don't need to fill */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100334 fill_level = (rx_queue->added_count - rx_queue->removed_count);
Edward Creee01b16a2016-12-02 15:51:33 +0000335 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100336 if (fill_level >= rx_queue->fast_fill_trigger)
Steve Hodgson24455802010-06-01 11:20:34 +0000337 goto out;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100338
339 /* Record minimum fill level */
Ben Hutchingsb3475642008-05-16 21:15:49 +0100340 if (unlikely(fill_level < rx_queue->min_fill)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100341 if (fill_level)
342 rx_queue->min_fill = fill_level;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100343 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100344
Daniel Pieczko1648a232013-02-13 10:54:41 +0000345 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
David Riddochda9ca502012-04-11 13:09:24 +0100346 space = rx_queue->max_fill - fill_level;
Edward Creee01b16a2016-12-02 15:51:33 +0000347 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100348
Ben Hutchings62776d02010-06-23 11:30:07 +0000349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
350 "RX queue %d fast-filling descriptor ring from"
Alexandre Rames97d48a12013-01-11 12:26:21 +0000351 " level %d to level %d\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000352 efx_rx_queue_index(rx_queue), fill_level,
Alexandre Rames97d48a12013-01-11 12:26:21 +0000353 rx_queue->max_fill);
354
Ben Hutchings8ceee662008-04-27 12:55:59 +0100355
356 do {
Jon Coopercce28792013-10-02 11:04:14 +0100357 rc = efx_init_rx_buffers(rx_queue, atomic);
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000358 if (unlikely(rc)) {
359 /* Ensure that we don't leave the rx queue empty */
Robert Stonehouse50f444a2019-02-14 17:27:43 +0000360 efx_schedule_slow_fill(rx_queue);
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000361 goto out;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100362 }
Daniel Pieczko1648a232013-02-13 10:54:41 +0000363 } while ((space -= batch_size) >= batch_size);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100364
Ben Hutchings62776d02010-06-23 11:30:07 +0000365 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
366 "RX queue %d fast-filled descriptor ring "
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000367 "to level %d\n", efx_rx_queue_index(rx_queue),
Ben Hutchings62776d02010-06-23 11:30:07 +0000368 rx_queue->added_count - rx_queue->removed_count);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100369
370 out:
Steve Hodgson24455802010-06-01 11:20:34 +0000371 if (rx_queue->notified_count != rx_queue->added_count)
372 efx_nic_notify_rx_desc(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100373}
374
Kees Cook7aa14022017-10-24 01:45:59 -0700375void efx_rx_slow_fill(struct timer_list *t)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100376{
Kees Cook7aa14022017-10-24 01:45:59 -0700377 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100378
Steve Hodgson90d683a2010-06-01 11:19:39 +0000379 /* Post an event to cause NAPI to run and refill the queue */
Ben Hutchings2ae75da2012-02-07 23:49:52 +0000380 efx_nic_generate_fill_event(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100381 ++rx_queue->slow_fill_count;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100382}
383
Ben Hutchings4d566062008-09-01 12:47:12 +0100384static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
385 struct efx_rx_buffer *rx_buf,
Alexandre Rames97d48a12013-01-11 12:26:21 +0000386 int len)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100387{
388 struct efx_nic *efx = rx_queue->efx;
389 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
390
391 if (likely(len <= max_len))
392 return;
393
394 /* The packet must be discarded, but this is only a fatal error
395 * if the caller indicated it was
396 */
Ben Hutchingsdb339562011-08-26 18:05:11 +0100397 rx_buf->flags |= EFX_RX_PKT_DISCARD;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100398
Edward Cree5a6681e2016-11-28 18:55:34 +0000399 if (net_ratelimit())
400 netif_err(efx, rx_err, efx->net_dev,
401 "RX queue %d overlength RX event (%#x > %#x)\n",
402 efx_rx_queue_index(rx_queue), len, max_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100403
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000404 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100405}
406
Ben Hutchings61321d92012-02-25 01:58:35 +0000407/* Pass a received packet up through GRO. GRO can handle pages
408 * regardless of checksum state and skbs with a good checksum.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100409 */
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000410static void
411efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
412 unsigned int n_frags, u8 *eh)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100413{
Herbert Xuda3bc072009-01-18 21:50:16 -0800414 struct napi_struct *napi = &channel->napi_str;
Ben Hutchings18e1d2b2009-10-29 07:21:24 +0000415 gro_result_t gro_result;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000416 struct efx_nic *efx = channel->efx;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000417 struct sk_buff *skb;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100418
Alexandre Rames97d48a12013-01-11 12:26:21 +0000419 skb = napi_get_frags(napi);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000420 if (unlikely(!skb)) {
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100421 struct efx_rx_queue *rx_queue;
422
423 rx_queue = efx_channel_get_rx_queue(channel);
424 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000425 return;
426 }
Ben Hutchings1241e952009-11-23 16:02:25 +0000427
Alexandre Rames97d48a12013-01-11 12:26:21 +0000428 if (efx->net_dev->features & NETIF_F_RXHASH)
Tom Herbertc7cb38a2013-12-17 23:31:50 -0800429 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
430 PKT_HASH_TYPE_L3);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000431 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
432 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
Jon Cooperda50ae22017-02-08 16:51:02 +0000433 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100434
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000435 for (;;) {
436 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
437 rx_buf->page, rx_buf->page_offset,
438 rx_buf->len);
439 rx_buf->page = NULL;
440 skb->len += rx_buf->len;
441 if (skb_shinfo(skb)->nr_frags == n_frags)
442 break;
443
444 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
445 }
446
447 skb->data_len = skb->len;
448 skb->truesize += n_frags * efx->rx_buffer_truesize;
449
Alexandre Rames97d48a12013-01-11 12:26:21 +0000450 skb_record_rx_queue(skb, channel->rx_queue.core_index);
Ben Hutchings3eadb7b2009-11-23 16:02:40 +0000451
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000452 gro_result = napi_gro_frags(napi);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000453 if (gro_result != GRO_DROP)
Ben Hutchings18e1d2b2009-10-29 07:21:24 +0000454 channel->irq_mod_score += 2;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000455}
456
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000457/* Allocate and construct an SKB around page fragments */
Alexandre Rames97d48a12013-01-11 12:26:21 +0000458static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
459 struct efx_rx_buffer *rx_buf,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000460 unsigned int n_frags,
Alexandre Rames97d48a12013-01-11 12:26:21 +0000461 u8 *eh, int hdr_len)
462{
463 struct efx_nic *efx = channel->efx;
464 struct sk_buff *skb;
465
466 /* Allocate an SKB to store the headers */
Ben Hutchings2ccd0b12013-11-28 18:58:11 +0000467 skb = netdev_alloc_skb(efx->net_dev,
468 efx->rx_ip_align + efx->rx_prefix_size +
469 hdr_len);
Edward Creee4d112e2014-07-15 11:58:12 +0100470 if (unlikely(skb == NULL)) {
471 atomic_inc(&efx->n_rx_noskb_drops);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000472 return NULL;
Edward Creee4d112e2014-07-15 11:58:12 +0100473 }
Alexandre Rames97d48a12013-01-11 12:26:21 +0000474
Edward Creee01b16a2016-12-02 15:51:33 +0000475 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000476
Ben Hutchings2ccd0b12013-11-28 18:58:11 +0000477 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
478 efx->rx_prefix_size + hdr_len);
479 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
480 __skb_put(skb, hdr_len);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000481
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000482 /* Append the remaining page(s) onto the frag list */
Alexandre Rames97d48a12013-01-11 12:26:21 +0000483 if (rx_buf->len > hdr_len) {
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000484 rx_buf->page_offset += hdr_len;
485 rx_buf->len -= hdr_len;
486
487 for (;;) {
488 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
489 rx_buf->page, rx_buf->page_offset,
490 rx_buf->len);
491 rx_buf->page = NULL;
492 skb->len += rx_buf->len;
493 skb->data_len += rx_buf->len;
494 if (skb_shinfo(skb)->nr_frags == n_frags)
495 break;
496
497 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
498 }
Alexandre Rames97d48a12013-01-11 12:26:21 +0000499 } else {
500 __free_pages(rx_buf->page, efx->rx_buffer_order);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000501 rx_buf->page = NULL;
502 n_frags = 0;
Ben Hutchings18e1d2b2009-10-29 07:21:24 +0000503 }
Alexandre Rames97d48a12013-01-11 12:26:21 +0000504
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000505 skb->truesize += n_frags * efx->rx_buffer_truesize;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000506
507 /* Move past the ethernet header */
508 skb->protocol = eth_type_trans(skb, efx->net_dev);
509
Alexandre Rames36763262014-07-22 14:03:25 +0100510 skb_mark_napi_id(skb, &channel->napi_str);
511
Alexandre Rames97d48a12013-01-11 12:26:21 +0000512 return skb;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100513}
514
Ben Hutchings8ceee662008-04-27 12:55:59 +0100515void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000516 unsigned int n_frags, unsigned int len, u16 flags)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100517{
518 struct efx_nic *efx = rx_queue->efx;
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000519 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100520 struct efx_rx_buffer *rx_buf;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100521
Andrew Rybchenko8ccf38002014-07-17 12:10:43 +0100522 rx_queue->rx_packets++;
523
Ben Hutchings8ceee662008-04-27 12:55:59 +0100524 rx_buf = efx_rx_buffer(rx_queue, index);
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000525 rx_buf->flags |= flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100526
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000527 /* Validate the number of fragments and completed length */
528 if (n_frags == 1) {
Ben Hutchings3dced742013-04-27 01:55:18 +0100529 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
530 efx_rx_packet__check_len(rx_queue, rx_buf, len);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
Jon Coopere8c68c02013-03-08 10:18:28 +0000532 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
533 unlikely(len > n_frags * efx->rx_dma_len) ||
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000534 unlikely(!efx->rx_scatter)) {
535 /* If this isn't an explicit discard request, either
536 * the hardware or the driver is broken.
537 */
538 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
539 rx_buf->flags |= EFX_RX_PKT_DISCARD;
540 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100541
Ben Hutchings62776d02010-06-23 11:30:07 +0000542 netif_vdbg(efx, rx_status, efx->net_dev,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000543 "RX queue %d received ids %x-%x len %d %s%s\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000544 efx_rx_queue_index(rx_queue), index,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000545 (index + n_frags - 1) & rx_queue->ptr_mask, len,
Ben Hutchingsdb339562011-08-26 18:05:11 +0100546 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
547 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
Ben Hutchings8ceee662008-04-27 12:55:59 +0100548
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000549 /* Discard packet, if instructed to do so. Process the
550 * previous receive first.
551 */
Ben Hutchingsdb339562011-08-26 18:05:11 +0100552 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000553 efx_rx_flush_packet(channel);
Ben Hutchings734d4e12013-07-04 23:48:46 +0100554 efx_discard_rx_packet(channel, rx_buf, n_frags);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000555 return;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100556 }
557
Ben Hutchings3dced742013-04-27 01:55:18 +0100558 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000559 rx_buf->len = len;
560
Daniel Pieczko27689352013-02-13 10:54:41 +0000561 /* Release and/or sync the DMA mapping - assumes all RX buffers
562 * consumed in-order per RX queue.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100563 */
Daniel Pieczko27689352013-02-13 10:54:41 +0000564 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100565
566 /* Prefetch nice and early so data will (hopefully) be in cache by
567 * the time we look at it.
568 */
Ben Hutchings5036b7c2013-01-29 23:33:15 +0000569 prefetch(efx_rx_buf_va(rx_buf));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100570
Jon Cooper43a37392012-10-18 15:49:54 +0100571 rx_buf->page_offset += efx->rx_prefix_size;
572 rx_buf->len -= efx->rx_prefix_size;
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000573
574 if (n_frags > 1) {
575 /* Release/sync DMA mapping for additional fragments.
576 * Fix length for last fragment.
577 */
578 unsigned int tail_frags = n_frags - 1;
579
580 for (;;) {
581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
582 if (--tail_frags == 0)
583 break;
Jon Coopere8c68c02013-03-08 10:18:28 +0000584 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000585 }
Jon Coopere8c68c02013-03-08 10:18:28 +0000586 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
Daniel Pieczko27689352013-02-13 10:54:41 +0000587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000588 }
Ben Hutchingsb74e3e82013-01-29 23:33:15 +0000589
Ben Hutchings734d4e12013-07-04 23:48:46 +0100590 /* All fragments have been DMA-synced, so recycle pages. */
Daniel Pieczko27689352013-02-13 10:54:41 +0000591 rx_buf = efx_rx_buffer(rx_queue, index);
Ben Hutchings734d4e12013-07-04 23:48:46 +0100592 efx_recycle_rx_pages(channel, rx_buf, n_frags);
Daniel Pieczko27689352013-02-13 10:54:41 +0000593
Ben Hutchings8ceee662008-04-27 12:55:59 +0100594 /* Pipeline receives so that we give time for packet headers to be
595 * prefetched into cache.
596 */
Ben Hutchingsff734ef2013-01-29 23:33:14 +0000597 efx_rx_flush_packet(channel);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000598 channel->rx_pkt_n_frags = n_frags;
599 channel->rx_pkt_index = index;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100600}
601
Alexandre Rames97d48a12013-01-11 12:26:21 +0000602static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000603 struct efx_rx_buffer *rx_buf,
604 unsigned int n_frags)
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000605{
606 struct sk_buff *skb;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000607 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000608
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000609 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000610 if (unlikely(skb == NULL)) {
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100611 struct efx_rx_queue *rx_queue;
612
613 rx_queue = efx_channel_get_rx_queue(channel);
614 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000615 return;
616 }
617 skb_record_rx_queue(skb, channel->rx_queue.core_index);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000618
619 /* Set the SKB flags */
620 skb_checksum_none_assert(skb);
Jon Cooperda50ae22017-02-08 16:51:02 +0000621 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
Jon Cooperc99dffc2013-04-08 12:49:48 +0100622 skb->ip_summed = CHECKSUM_UNNECESSARY;
Jon Cooperda50ae22017-02-08 16:51:02 +0000623 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
624 }
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000625
Jon Cooperbd9a2652013-11-18 12:54:41 +0000626 efx_rx_skb_attach_timestamp(channel, skb);
627
Stuart Hodgsonc31e5f92012-07-18 09:52:11 +0100628 if (channel->type->receive_skb)
Ben Hutchings4a74dc652013-03-05 20:13:54 +0000629 if (channel->type->receive_skb(channel, skb))
Alexandre Rames97d48a12013-01-11 12:26:21 +0000630 return;
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000631
Ben Hutchings4a74dc652013-03-05 20:13:54 +0000632 /* Pass the packet up */
Edward Creee090bfb2018-07-02 16:12:53 +0100633 if (channel->rx_list != NULL)
634 /* Add to list, will pass up later */
635 list_add_tail(&skb->list, channel->rx_list);
636 else
637 /* No list, so pass it up now */
638 netif_receive_skb(skb);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000639}
640
Ben Hutchings8ceee662008-04-27 12:55:59 +0100641/* Handle a received packet. Second half: Touches packet payload. */
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000642void __efx_rx_packet(struct efx_channel *channel)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100643{
644 struct efx_nic *efx = channel->efx;
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000645 struct efx_rx_buffer *rx_buf =
646 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
Ben Hutchingsb74e3e82013-01-29 23:33:15 +0000647 u8 *eh = efx_rx_buf_va(rx_buf);
Ben Hutchings604f6042010-06-25 07:05:33 +0000648
Ben Hutchings3dced742013-04-27 01:55:18 +0100649 /* Read length from the prefix if necessary. This already
650 * excludes the length of the prefix itself.
651 */
652 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
653 rx_buf->len = le16_to_cpup((__le16 *)
654 (eh + efx->rx_packet_len_offset));
655
Ben Hutchings3273c2e2008-05-07 13:36:19 +0100656 /* If we're in loopback test, then pass the packet directly to the
657 * loopback layer, and free the rx_buf here
658 */
659 if (unlikely(efx->loopback_selftest)) {
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100660 struct efx_rx_queue *rx_queue;
661
Steve Hodgsona526f142011-02-24 23:45:16 +0000662 efx_loopback_rx_packet(efx, eh, rx_buf->len);
Daniel Pieczko9eb0a5d2015-05-29 12:25:54 +0100663 rx_queue = efx_channel_get_rx_queue(channel);
664 efx_free_rx_buffers(rx_queue, rx_buf,
665 channel->rx_pkt_n_frags);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000666 goto out;
Ben Hutchings3273c2e2008-05-07 13:36:19 +0100667 }
668
Ben Hutchingsabfe9032011-04-05 15:00:02 +0100669 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
Ben Hutchingsdb339562011-08-26 18:05:11 +0100670 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
Ben Hutchingsab3cf6d2011-04-01 22:20:06 +0100671
Eric Dumazete7fe9492017-02-02 17:13:19 -0800672 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000673 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000674 else
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000675 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
676out:
677 channel->rx_pkt_n_frags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100678}
679
680int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
681{
682 struct efx_nic *efx = rx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000683 unsigned int entries;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100684 int rc;
685
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000686 /* Create the smallest power-of-two aligned ring */
687 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
Edward Creee01b16a2016-12-02 15:51:33 +0000688 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000689 rx_queue->ptr_mask = entries - 1;
690
Ben Hutchings62776d02010-06-23 11:30:07 +0000691 netif_dbg(efx, probe, efx->net_dev,
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000692 "creating RX queue %d size %#x mask %#x\n",
693 efx_rx_queue_index(rx_queue), efx->rxq_entries,
694 rx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100695
696 /* Allocate RX buffers */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000697 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000698 GFP_KERNEL);
Ben Hutchings8831da72008-09-01 12:47:48 +0100699 if (!rx_queue->buffer)
700 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100701
Ben Hutchings152b6a62009-11-29 03:43:56 +0000702 rc = efx_nic_probe_rx(rx_queue);
Ben Hutchings8831da72008-09-01 12:47:48 +0100703 if (rc) {
704 kfree(rx_queue->buffer);
705 rx_queue->buffer = NULL;
706 }
Daniel Pieczko27689352013-02-13 10:54:41 +0000707
Ben Hutchings8ceee662008-04-27 12:55:59 +0100708 return rc;
709}
710
stephen hemmingerdebd0032013-03-16 06:57:51 +0000711static void efx_init_rx_recycle_ring(struct efx_nic *efx,
712 struct efx_rx_queue *rx_queue)
Daniel Pieczko27689352013-02-13 10:54:41 +0000713{
714 unsigned int bufs_in_recycle_ring, page_ring_size;
715
716 /* Set the RX recycle ring size */
717#ifdef CONFIG_PPC64
718 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
719#else
Ben Hutchings636d73d2013-06-12 18:09:08 +0100720 if (iommu_present(&pci_bus_type))
Daniel Pieczko27689352013-02-13 10:54:41 +0000721 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
722 else
723 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
724#endif /* CONFIG_PPC64 */
725
726 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
727 efx->rx_bufs_per_page);
728 rx_queue->page_ring = kcalloc(page_ring_size,
729 sizeof(*rx_queue->page_ring), GFP_KERNEL);
730 rx_queue->page_ptr_mask = page_ring_size - 1;
731}
732
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100733void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100734{
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000735 struct efx_nic *efx = rx_queue->efx;
David Riddoch64235182012-04-11 13:12:41 +0100736 unsigned int max_fill, trigger, max_trigger;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100737
Ben Hutchings62776d02010-06-23 11:30:07 +0000738 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000739 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100740
741 /* Initialise ptr fields */
742 rx_queue->added_count = 0;
743 rx_queue->notified_count = 0;
744 rx_queue->removed_count = 0;
745 rx_queue->min_fill = -1U;
Daniel Pieczko27689352013-02-13 10:54:41 +0000746 efx_init_rx_recycle_ring(efx, rx_queue);
747
748 rx_queue->page_remove = 0;
749 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
750 rx_queue->page_recycle_count = 0;
751 rx_queue->page_recycle_failed = 0;
752 rx_queue->page_recycle_full = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100753
754 /* Initialise limit fields */
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000755 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000756 max_trigger =
757 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
David Riddoch64235182012-04-11 13:12:41 +0100758 if (rx_refill_threshold != 0) {
759 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
760 if (trigger > max_trigger)
761 trigger = max_trigger;
762 } else {
763 trigger = max_trigger;
764 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100765
766 rx_queue->max_fill = max_fill;
767 rx_queue->fast_fill_trigger = trigger;
Ben Hutchingsd8aec742013-05-27 16:52:54 +0100768 rx_queue->refill_enabled = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100769
770 /* Set up RX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000771 efx_nic_init_rx(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100772}
773
774void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
775{
776 int i;
Daniel Pieczko27689352013-02-13 10:54:41 +0000777 struct efx_nic *efx = rx_queue->efx;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100778 struct efx_rx_buffer *rx_buf;
779
Ben Hutchings62776d02010-06-23 11:30:07 +0000780 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000781 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100782
Steve Hodgson90d683a2010-06-01 11:19:39 +0000783 del_timer_sync(&rx_queue->slow_fill);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100784
Daniel Pieczko27689352013-02-13 10:54:41 +0000785 /* Release RX buffers from the current read ptr to the write ptr */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100786 if (rx_queue->buffer) {
Daniel Pieczko27689352013-02-13 10:54:41 +0000787 for (i = rx_queue->removed_count; i < rx_queue->added_count;
788 i++) {
789 unsigned index = i & rx_queue->ptr_mask;
790 rx_buf = efx_rx_buffer(rx_queue, index);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100791 efx_fini_rx_buffer(rx_queue, rx_buf);
792 }
793 }
Daniel Pieczko27689352013-02-13 10:54:41 +0000794
795 /* Unmap and release the pages in the recycle ring. Remove the ring. */
796 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
797 struct page *page = rx_queue->page_ring[i];
798 struct efx_rx_page_state *state;
799
800 if (page == NULL)
801 continue;
802
803 state = page_address(page);
804 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
805 PAGE_SIZE << efx->rx_buffer_order,
806 DMA_FROM_DEVICE);
807 put_page(page);
808 }
809 kfree(rx_queue->page_ring);
810 rx_queue->page_ring = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100811}
812
813void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
814{
Ben Hutchings62776d02010-06-23 11:30:07 +0000815 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000816 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100817
Ben Hutchings152b6a62009-11-29 03:43:56 +0000818 efx_nic_remove_rx(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100819
820 kfree(rx_queue->buffer);
821 rx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100822}
823
Ben Hutchings8ceee662008-04-27 12:55:59 +0100824
Ben Hutchings8ceee662008-04-27 12:55:59 +0100825module_param(rx_refill_threshold, uint, 0444);
826MODULE_PARM_DESC(rx_refill_threshold,
David Riddoch64235182012-04-11 13:12:41 +0100827 "RX descriptor ring refill threshold (%)");
Ben Hutchings8ceee662008-04-27 12:55:59 +0100828
Ben Hutchingsadd72472012-11-08 01:46:53 +0000829#ifdef CONFIG_RFS_ACCEL
830
Edward Cree3af0f342018-03-27 17:41:59 +0100831static void efx_filter_rfs_work(struct work_struct *data)
832{
833 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
834 work);
835 struct efx_nic *efx = netdev_priv(req->net_dev);
836 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
Edward Creef9937402018-04-13 19:18:09 +0100837 int slot_idx = req - efx->rps_slot;
Edward Creef8d62032018-04-24 17:09:30 +0100838 struct efx_arfs_rule *rule;
839 u16 arfs_id = 0;
Edward Cree3af0f342018-03-27 17:41:59 +0100840 int rc;
841
Edward Cree494bef42018-04-13 19:17:22 +0100842 rc = efx->type->filter_insert(efx, &req->spec, true);
Edward Creeded8b9c2018-04-27 15:08:41 +0100843 if (rc >= 0)
844 rc %= efx->type->max_rx_ip_filters;
Edward Creef8d62032018-04-24 17:09:30 +0100845 if (efx->rps_hash_table) {
846 spin_lock_bh(&efx->rps_hash_lock);
847 rule = efx_rps_hash_find(efx, &req->spec);
848 /* The rule might have already gone, if someone else's request
849 * for the same spec was already worked and then expired before
850 * we got around to our work. In that case we have nothing
851 * tying us to an arfs_id, meaning that as soon as the filter
852 * is considered for expiry it will be removed.
853 */
854 if (rule) {
855 if (rc < 0)
856 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
857 else
858 rule->filter_id = rc;
859 arfs_id = rule->arfs_id;
860 }
861 spin_unlock_bh(&efx->rps_hash_lock);
862 }
Edward Cree3af0f342018-03-27 17:41:59 +0100863 if (rc >= 0) {
864 /* Remember this so we can check whether to expire the filter
865 * later.
866 */
867 mutex_lock(&efx->rps_mutex);
868 channel->rps_flow_id[rc] = req->flow_id;
869 ++channel->rfs_filters_added;
870 mutex_unlock(&efx->rps_mutex);
871
872 if (req->spec.ether_type == htons(ETH_P_IP))
873 netif_info(efx, rx_status, efx->net_dev,
Edward Creef8d62032018-04-24 17:09:30 +0100874 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
Edward Cree3af0f342018-03-27 17:41:59 +0100875 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
876 req->spec.rem_host, ntohs(req->spec.rem_port),
877 req->spec.loc_host, ntohs(req->spec.loc_port),
Edward Creef8d62032018-04-24 17:09:30 +0100878 req->rxq_index, req->flow_id, rc, arfs_id);
Edward Cree3af0f342018-03-27 17:41:59 +0100879 else
880 netif_info(efx, rx_status, efx->net_dev,
Edward Creef8d62032018-04-24 17:09:30 +0100881 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
Edward Cree3af0f342018-03-27 17:41:59 +0100882 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
883 req->spec.rem_host, ntohs(req->spec.rem_port),
884 req->spec.loc_host, ntohs(req->spec.loc_port),
Edward Creef8d62032018-04-24 17:09:30 +0100885 req->rxq_index, req->flow_id, rc, arfs_id);
Edward Cree3af0f342018-03-27 17:41:59 +0100886 }
887
888 /* Release references */
Edward Creef9937402018-04-13 19:18:09 +0100889 clear_bit(slot_idx, &efx->rps_slot_map);
Edward Cree3af0f342018-03-27 17:41:59 +0100890 dev_put(req->net_dev);
Edward Cree3af0f342018-03-27 17:41:59 +0100891}
892
Ben Hutchingsadd72472012-11-08 01:46:53 +0000893int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
894 u16 rxq_index, u32 flow_id)
895{
896 struct efx_nic *efx = netdev_priv(net_dev);
Edward Cree3af0f342018-03-27 17:41:59 +0100897 struct efx_async_filter_insertion *req;
Edward Creef8d62032018-04-24 17:09:30 +0100898 struct efx_arfs_rule *rule;
Edward Cree68bb399e2016-05-26 21:46:05 +0100899 struct flow_keys fk;
Edward Creef9937402018-04-13 19:18:09 +0100900 int slot_idx;
Edward Creef8d62032018-04-24 17:09:30 +0100901 bool new;
Edward Creef9937402018-04-13 19:18:09 +0100902 int rc;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000903
Edward Creef9937402018-04-13 19:18:09 +0100904 /* find a free slot */
905 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
906 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
907 break;
908 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
909 return -EBUSY;
Jon Cooperfaf8dcc2016-05-31 19:12:32 +0100910
Edward Creef9937402018-04-13 19:18:09 +0100911 if (flow_id == RPS_FLOW_ID_INVALID) {
912 rc = -EINVAL;
913 goto out_clear;
914 }
Ben Hutchingsadd72472012-11-08 01:46:53 +0000915
Edward Creef9937402018-04-13 19:18:09 +0100916 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
917 rc = -EPROTONOSUPPORT;
918 goto out_clear;
919 }
Ben Hutchingsadd72472012-11-08 01:46:53 +0000920
Edward Creef9937402018-04-13 19:18:09 +0100921 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
922 rc = -EPROTONOSUPPORT;
923 goto out_clear;
924 }
925 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
926 rc = -EPROTONOSUPPORT;
927 goto out_clear;
928 }
Edward Cree3af0f342018-03-27 17:41:59 +0100929
Edward Creef9937402018-04-13 19:18:09 +0100930 req = efx->rps_slot + slot_idx;
Edward Cree3af0f342018-03-27 17:41:59 +0100931 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
Ben Hutchingsadd72472012-11-08 01:46:53 +0000932 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
933 rxq_index);
Edward Cree3af0f342018-03-27 17:41:59 +0100934 req->spec.match_flags =
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100935 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
936 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
937 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
Edward Cree3af0f342018-03-27 17:41:59 +0100938 req->spec.ether_type = fk.basic.n_proto;
939 req->spec.ip_proto = fk.basic.ip_proto;
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100940
Edward Cree68bb399e2016-05-26 21:46:05 +0100941 if (fk.basic.n_proto == htons(ETH_P_IP)) {
Edward Cree3af0f342018-03-27 17:41:59 +0100942 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
943 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100944 } else {
Edward Cree3af0f342018-03-27 17:41:59 +0100945 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
946 sizeof(struct in6_addr));
947 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
948 sizeof(struct in6_addr));
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100949 }
950
Edward Cree3af0f342018-03-27 17:41:59 +0100951 req->spec.rem_port = fk.ports.src;
952 req->spec.loc_port = fk.ports.dst;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000953
Edward Creef8d62032018-04-24 17:09:30 +0100954 if (efx->rps_hash_table) {
955 /* Add it to ARFS hash table */
956 spin_lock(&efx->rps_hash_lock);
957 rule = efx_rps_hash_add(efx, &req->spec, &new);
958 if (!rule) {
959 rc = -ENOMEM;
960 goto out_unlock;
961 }
962 if (new)
963 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
964 rc = rule->arfs_id;
965 /* Skip if existing or pending filter already does the right thing */
966 if (!new && rule->rxq_index == rxq_index &&
967 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
968 goto out_unlock;
969 rule->rxq_index = rxq_index;
970 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
971 spin_unlock(&efx->rps_hash_lock);
972 } else {
973 /* Without an ARFS hash table, we just use arfs_id 0 for all
974 * filters. This means if multiple flows hash to the same
975 * flow_id, all but the most recently touched will be eligible
976 * for expiry.
977 */
978 rc = 0;
979 }
980
981 /* Queue the request */
Edward Cree3af0f342018-03-27 17:41:59 +0100982 dev_hold(req->net_dev = net_dev);
983 INIT_WORK(&req->work, efx_filter_rfs_work);
984 req->rxq_index = rxq_index;
985 req->flow_id = flow_id;
986 schedule_work(&req->work);
Edward Creef8d62032018-04-24 17:09:30 +0100987 return rc;
988out_unlock:
989 spin_unlock(&efx->rps_hash_lock);
Edward Creef9937402018-04-13 19:18:09 +0100990out_clear:
991 clear_bit(slot_idx, &efx->rps_slot_map);
992 return rc;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000993}
994
995bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
996{
997 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
Jon Cooperfaf8dcc2016-05-31 19:12:32 +0100998 unsigned int channel_idx, index, size;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000999 u32 flow_id;
1000
Edward Cree3af0f342018-03-27 17:41:59 +01001001 if (!mutex_trylock(&efx->rps_mutex))
Ben Hutchingsadd72472012-11-08 01:46:53 +00001002 return false;
Ben Hutchingsadd72472012-11-08 01:46:53 +00001003 expire_one = efx->type->filter_rfs_expire_one;
Jon Cooperfaf8dcc2016-05-31 19:12:32 +01001004 channel_idx = efx->rps_expire_channel;
Ben Hutchingsadd72472012-11-08 01:46:53 +00001005 index = efx->rps_expire_index;
1006 size = efx->type->max_rx_ip_filters;
1007 while (quota--) {
Jon Cooperfaf8dcc2016-05-31 19:12:32 +01001008 struct efx_channel *channel = efx_get_channel(efx, channel_idx);
1009 flow_id = channel->rps_flow_id[index];
1010
1011 if (flow_id != RPS_FLOW_ID_INVALID &&
1012 expire_one(efx, flow_id, index)) {
Ben Hutchingsadd72472012-11-08 01:46:53 +00001013 netif_info(efx, rx_status, efx->net_dev,
Jon Cooperfaf8dcc2016-05-31 19:12:32 +01001014 "expired filter %d [queue %u flow %u]\n",
1015 index, channel_idx, flow_id);
1016 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1017 }
1018 if (++index == size) {
1019 if (++channel_idx == efx->n_channels)
1020 channel_idx = 0;
Ben Hutchingsadd72472012-11-08 01:46:53 +00001021 index = 0;
Jon Cooperfaf8dcc2016-05-31 19:12:32 +01001022 }
Ben Hutchingsadd72472012-11-08 01:46:53 +00001023 }
Jon Cooperfaf8dcc2016-05-31 19:12:32 +01001024 efx->rps_expire_channel = channel_idx;
Ben Hutchingsadd72472012-11-08 01:46:53 +00001025 efx->rps_expire_index = index;
1026
Edward Cree3af0f342018-03-27 17:41:59 +01001027 mutex_unlock(&efx->rps_mutex);
Ben Hutchingsadd72472012-11-08 01:46:53 +00001028 return true;
1029}
1030
1031#endif /* CONFIG_RFS_ACCEL */
Ben Hutchingsb883d0b2013-01-15 22:00:07 +00001032
1033/**
1034 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
1035 * @spec: Specification to test
1036 *
1037 * Return: %true if the specification is a non-drop RX filter that
1038 * matches a local MAC address I/G bit value of 1 or matches a local
1039 * IPv4 or IPv6 address value in the respective multicast address
1040 * range. Otherwise %false.
1041 */
1042bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
1043{
1044 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
1045 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
1046 return false;
1047
1048 if (spec->match_flags &
1049 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
1050 is_multicast_ether_addr(spec->loc_mac))
1051 return true;
1052
1053 if ((spec->match_flags &
1054 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1055 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1056 if (spec->ether_type == htons(ETH_P_IP) &&
1057 ipv4_is_multicast(spec->loc_host[0]))
1058 return true;
1059 if (spec->ether_type == htons(ETH_P_IPV6) &&
1060 ((const u8 *)spec->loc_host)[0] == 0xff)
1061 return true;
1062 }
1063
1064 return false;
1065}