Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 2 | /**************************************************************************** |
Ben Hutchings | f7a6d2c | 2013-08-29 23:32:48 +0100 | [diff] [blame] | 3 | * Driver for Solarflare network controllers and boards |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 4 | * Copyright 2005-2006 Fen Systems Ltd. |
Ben Hutchings | f7a6d2c | 2013-08-29 23:32:48 +0100 | [diff] [blame] | 5 | * Copyright 2005-2013 Solarflare Communications Inc. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/socket.h> |
| 9 | #include <linux/in.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 11 | #include <linux/ip.h> |
Ben Hutchings | c47b2d9 | 2013-09-03 17:22:23 +0100 | [diff] [blame] | 12 | #include <linux/ipv6.h> |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 13 | #include <linux/tcp.h> |
| 14 | #include <linux/udp.h> |
Paul Gortmaker | 70c7160 | 2011-05-22 16:47:17 -0400 | [diff] [blame] | 15 | #include <linux/prefetch.h> |
Paul Gortmaker | 6eb07ca | 2011-09-15 19:46:05 -0400 | [diff] [blame] | 16 | #include <linux/moduleparam.h> |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 17 | #include <linux/iommu.h> |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 18 | #include <net/ip.h> |
| 19 | #include <net/checksum.h> |
| 20 | #include "net_driver.h" |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 21 | #include "efx.h" |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 22 | #include "filter.h" |
Ben Hutchings | 744093c | 2009-11-29 15:12:08 +0000 | [diff] [blame] | 23 | #include "nic.h" |
Ben Hutchings | 3273c2e | 2008-05-07 13:36:19 +0100 | [diff] [blame] | 24 | #include "selftest.h" |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 25 | #include "workarounds.h" |
| 26 | |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 27 | /* Preferred number of descriptors to fill at once */ |
| 28 | #define EFX_RX_PREFERRED_BATCH 8U |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 29 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 30 | /* Number of RX buffers to recycle pages for. When creating the RX page recycle |
| 31 | * ring, this number is divided by the number of buffers per page to calculate |
| 32 | * the number of pages to store in the RX page recycle ring. |
| 33 | */ |
| 34 | #define EFX_RECYCLE_RING_SIZE_IOMMU 4096 |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 35 | #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) |
Steve Hodgson | 62b330b | 2010-06-01 11:20:53 +0000 | [diff] [blame] | 36 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 37 | /* Size of buffer allocated for skb header area. */ |
Jon Cooper | d4ef5b6 | 2013-04-08 12:55:58 +0100 | [diff] [blame] | 38 | #define EFX_SKB_HEADERS 128u |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 39 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 40 | /* This is the percentage fill level below which new RX descriptors |
| 41 | * will be added to the RX descriptor ring. |
| 42 | */ |
David Riddoch | 6423518 | 2012-04-11 13:12:41 +0100 | [diff] [blame] | 43 | static unsigned int rx_refill_threshold; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 44 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 45 | /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ |
| 46 | #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ |
| 47 | EFX_RX_USR_BUF_SIZE) |
| 48 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 49 | /* |
| 50 | * RX maximum head room required. |
| 51 | * |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 52 | * This must be at least 1 to prevent overflow, plus one packet-worth |
| 53 | * to allow pipelined receives. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 54 | */ |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 55 | #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 56 | |
Ben Hutchings | b184f16 | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 57 | static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) |
Ben Hutchings | 39c9cf0 | 2010-06-23 11:31:28 +0000 | [diff] [blame] | 58 | { |
Ben Hutchings | b184f16 | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 59 | return page_address(buf->page) + buf->page_offset; |
Steve Hodgson | a526f14 | 2011-02-24 23:45:16 +0000 | [diff] [blame] | 60 | } |
| 61 | |
Jon Cooper | 43a3739 | 2012-10-18 15:49:54 +0100 | [diff] [blame] | 62 | static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) |
Steve Hodgson | a526f14 | 2011-02-24 23:45:16 +0000 | [diff] [blame] | 63 | { |
Jon Cooper | 43a3739 | 2012-10-18 15:49:54 +0100 | [diff] [blame] | 64 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
| 65 | return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); |
Ben Hutchings | 39c9cf0 | 2010-06-23 11:31:28 +0000 | [diff] [blame] | 66 | #else |
Jon Cooper | 43a3739 | 2012-10-18 15:49:54 +0100 | [diff] [blame] | 67 | const u8 *data = eh + efx->rx_packet_hash_offset; |
Ben Hutchings | 0beaca2 | 2012-01-05 18:54:04 +0000 | [diff] [blame] | 68 | return (u32)data[0] | |
| 69 | (u32)data[1] << 8 | |
| 70 | (u32)data[2] << 16 | |
| 71 | (u32)data[3] << 24; |
Ben Hutchings | 39c9cf0 | 2010-06-23 11:31:28 +0000 | [diff] [blame] | 72 | #endif |
| 73 | } |
| 74 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 75 | static inline struct efx_rx_buffer * |
| 76 | efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) |
| 77 | { |
| 78 | if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) |
| 79 | return efx_rx_buffer(rx_queue, 0); |
| 80 | else |
| 81 | return rx_buf + 1; |
| 82 | } |
| 83 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 84 | static inline void efx_sync_rx_buffer(struct efx_nic *efx, |
| 85 | struct efx_rx_buffer *rx_buf, |
| 86 | unsigned int len) |
| 87 | { |
| 88 | dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, |
| 89 | DMA_FROM_DEVICE); |
| 90 | } |
| 91 | |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 92 | void efx_rx_config_page_split(struct efx_nic *efx) |
| 93 | { |
Andrew Rybchenko | 2ec0301 | 2013-11-16 11:02:27 +0400 | [diff] [blame] | 94 | efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, |
Ben Hutchings | 950c54d | 2013-05-13 12:01:22 +0000 | [diff] [blame] | 95 | EFX_RX_BUF_ALIGNMENT); |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 96 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : |
| 97 | ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / |
| 98 | efx->rx_page_buf_step); |
| 99 | efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / |
| 100 | efx->rx_bufs_per_page; |
| 101 | efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, |
| 102 | efx->rx_bufs_per_page); |
| 103 | } |
| 104 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 105 | /* Check the RX page recycle ring for a page that can be reused. */ |
| 106 | static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) |
| 107 | { |
| 108 | struct efx_nic *efx = rx_queue->efx; |
| 109 | struct page *page; |
| 110 | struct efx_rx_page_state *state; |
| 111 | unsigned index; |
| 112 | |
| 113 | index = rx_queue->page_remove & rx_queue->page_ptr_mask; |
| 114 | page = rx_queue->page_ring[index]; |
| 115 | if (page == NULL) |
| 116 | return NULL; |
| 117 | |
| 118 | rx_queue->page_ring[index] = NULL; |
| 119 | /* page_remove cannot exceed page_add. */ |
| 120 | if (rx_queue->page_remove != rx_queue->page_add) |
| 121 | ++rx_queue->page_remove; |
| 122 | |
| 123 | /* If page_count is 1 then we hold the only reference to this page. */ |
| 124 | if (page_count(page) == 1) { |
| 125 | ++rx_queue->page_recycle_count; |
| 126 | return page; |
| 127 | } else { |
| 128 | state = page_address(page); |
| 129 | dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, |
| 130 | PAGE_SIZE << efx->rx_buffer_order, |
| 131 | DMA_FROM_DEVICE); |
| 132 | put_page(page); |
| 133 | ++rx_queue->page_recycle_failed; |
| 134 | } |
| 135 | |
| 136 | return NULL; |
| 137 | } |
| 138 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 139 | /** |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 140 | * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 141 | * |
| 142 | * @rx_queue: Efx RX queue |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 143 | * |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 144 | * This allocates a batch of pages, maps them for DMA, and populates |
| 145 | * struct efx_rx_buffers for each one. Return a negative error code or |
| 146 | * 0 on success. If a single page can be used for multiple buffers, |
| 147 | * then the page will either be inserted fully, or not at all. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 148 | */ |
Jon Cooper | cce2879 | 2013-10-02 11:04:14 +0100 | [diff] [blame] | 149 | static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 150 | { |
| 151 | struct efx_nic *efx = rx_queue->efx; |
Steve Hodgson | f7d6f37 | 2010-06-01 11:33:17 +0000 | [diff] [blame] | 152 | struct efx_rx_buffer *rx_buf; |
| 153 | struct page *page; |
Ben Hutchings | b590ace | 2013-01-10 23:51:54 +0000 | [diff] [blame] | 154 | unsigned int page_offset; |
Steve Hodgson | 62b330b | 2010-06-01 11:20:53 +0000 | [diff] [blame] | 155 | struct efx_rx_page_state *state; |
Steve Hodgson | f7d6f37 | 2010-06-01 11:33:17 +0000 | [diff] [blame] | 156 | dma_addr_t dma_addr; |
| 157 | unsigned index, count; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 158 | |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 159 | count = 0; |
| 160 | do { |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 161 | page = efx_reuse_page(rx_queue); |
| 162 | if (page == NULL) { |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 163 | page = alloc_pages(__GFP_COMP | |
Jon Cooper | cce2879 | 2013-10-02 11:04:14 +0100 | [diff] [blame] | 164 | (atomic ? GFP_ATOMIC : GFP_KERNEL), |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 165 | efx->rx_buffer_order); |
| 166 | if (unlikely(page == NULL)) |
| 167 | return -ENOMEM; |
| 168 | dma_addr = |
| 169 | dma_map_page(&efx->pci_dev->dev, page, 0, |
| 170 | PAGE_SIZE << efx->rx_buffer_order, |
| 171 | DMA_FROM_DEVICE); |
| 172 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, |
| 173 | dma_addr))) { |
| 174 | __free_pages(page, efx->rx_buffer_order); |
| 175 | return -EIO; |
| 176 | } |
| 177 | state = page_address(page); |
| 178 | state->dma_addr = dma_addr; |
| 179 | } else { |
| 180 | state = page_address(page); |
| 181 | dma_addr = state->dma_addr; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 182 | } |
Steve Hodgson | 62b330b | 2010-06-01 11:20:53 +0000 | [diff] [blame] | 183 | |
Steve Hodgson | 62b330b | 2010-06-01 11:20:53 +0000 | [diff] [blame] | 184 | dma_addr += sizeof(struct efx_rx_page_state); |
Ben Hutchings | b590ace | 2013-01-10 23:51:54 +0000 | [diff] [blame] | 185 | page_offset = sizeof(struct efx_rx_page_state); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 186 | |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 187 | do { |
| 188 | index = rx_queue->added_count & rx_queue->ptr_mask; |
| 189 | rx_buf = efx_rx_buffer(rx_queue, index); |
Andrew Rybchenko | 2ec0301 | 2013-11-16 11:02:27 +0400 | [diff] [blame] | 190 | rx_buf->dma_addr = dma_addr + efx->rx_ip_align; |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 191 | rx_buf->page = page; |
Andrew Rybchenko | 2ec0301 | 2013-11-16 11:02:27 +0400 | [diff] [blame] | 192 | rx_buf->page_offset = page_offset + efx->rx_ip_align; |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 193 | rx_buf->len = efx->rx_dma_len; |
Ben Hutchings | 179ea7f | 2013-03-07 16:31:17 +0000 | [diff] [blame] | 194 | rx_buf->flags = 0; |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 195 | ++rx_queue->added_count; |
| 196 | get_page(page); |
| 197 | dma_addr += efx->rx_page_buf_step; |
| 198 | page_offset += efx->rx_page_buf_step; |
| 199 | } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); |
Ben Hutchings | 179ea7f | 2013-03-07 16:31:17 +0000 | [diff] [blame] | 200 | |
| 201 | rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 202 | } while (++count < efx->rx_pages_per_batch); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 203 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 204 | return 0; |
| 205 | } |
| 206 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 207 | /* Unmap a DMA-mapped page. This function is only called for the final RX |
| 208 | * buffer in a page. |
| 209 | */ |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 210 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 211 | struct efx_rx_buffer *rx_buf) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 212 | { |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 213 | struct page *page = rx_buf->page; |
Steve Hodgson | 62b330b | 2010-06-01 11:20:53 +0000 | [diff] [blame] | 214 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 215 | if (page) { |
| 216 | struct efx_rx_page_state *state = page_address(page); |
| 217 | dma_unmap_page(&efx->pci_dev->dev, |
| 218 | state->dma_addr, |
| 219 | PAGE_SIZE << efx->rx_buffer_order, |
| 220 | DMA_FROM_DEVICE); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 221 | } |
| 222 | } |
| 223 | |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 224 | static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, |
| 225 | struct efx_rx_buffer *rx_buf, |
| 226 | unsigned int num_bufs) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 227 | { |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 228 | do { |
| 229 | if (rx_buf->page) { |
| 230 | put_page(rx_buf->page); |
| 231 | rx_buf->page = NULL; |
| 232 | } |
| 233 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); |
| 234 | } while (--num_bufs); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 235 | } |
| 236 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 237 | /* Attempt to recycle the page if there is an RX recycle ring; the page can |
| 238 | * only be added if this is the final RX buffer, to prevent pages being used in |
| 239 | * the descriptor ring and appearing in the recycle ring simultaneously. |
| 240 | */ |
| 241 | static void efx_recycle_rx_page(struct efx_channel *channel, |
| 242 | struct efx_rx_buffer *rx_buf) |
| 243 | { |
| 244 | struct page *page = rx_buf->page; |
| 245 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
| 246 | struct efx_nic *efx = rx_queue->efx; |
| 247 | unsigned index; |
| 248 | |
| 249 | /* Only recycle the page after processing the final buffer. */ |
Ben Hutchings | 179ea7f | 2013-03-07 16:31:17 +0000 | [diff] [blame] | 250 | if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 251 | return; |
| 252 | |
| 253 | index = rx_queue->page_add & rx_queue->page_ptr_mask; |
| 254 | if (rx_queue->page_ring[index] == NULL) { |
| 255 | unsigned read_index = rx_queue->page_remove & |
| 256 | rx_queue->page_ptr_mask; |
| 257 | |
| 258 | /* The next slot in the recycle ring is available, but |
| 259 | * increment page_remove if the read pointer currently |
| 260 | * points here. |
| 261 | */ |
| 262 | if (read_index == index) |
| 263 | ++rx_queue->page_remove; |
| 264 | rx_queue->page_ring[index] = page; |
| 265 | ++rx_queue->page_add; |
| 266 | return; |
| 267 | } |
| 268 | ++rx_queue->page_recycle_full; |
| 269 | efx_unmap_rx_buffer(efx, rx_buf); |
| 270 | put_page(rx_buf->page); |
| 271 | } |
| 272 | |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 273 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
| 274 | struct efx_rx_buffer *rx_buf) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 275 | { |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 276 | /* Release the page reference we hold for the buffer. */ |
| 277 | if (rx_buf->page) |
| 278 | put_page(rx_buf->page); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 279 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 280 | /* If this is the last buffer in a page, unmap and free it. */ |
Ben Hutchings | 179ea7f | 2013-03-07 16:31:17 +0000 | [diff] [blame] | 281 | if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 282 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 283 | efx_free_rx_buffers(rx_queue, rx_buf, 1); |
Steve Hodgson | 62b330b | 2010-06-01 11:20:53 +0000 | [diff] [blame] | 284 | } |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 285 | rx_buf->page = NULL; |
Steve Hodgson | 2445580 | 2010-06-01 11:20:34 +0000 | [diff] [blame] | 286 | } |
| 287 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 288 | /* Recycle the pages that are used by buffers that have just been received. */ |
Ben Hutchings | 734d4e1 | 2013-07-04 23:48:46 +0100 | [diff] [blame] | 289 | static void efx_recycle_rx_pages(struct efx_channel *channel, |
| 290 | struct efx_rx_buffer *rx_buf, |
| 291 | unsigned int n_frags) |
Steve Hodgson | 2445580 | 2010-06-01 11:20:34 +0000 | [diff] [blame] | 292 | { |
Ben Hutchings | f7d12cd | 2010-09-10 06:41:47 +0000 | [diff] [blame] | 293 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
Steve Hodgson | 2445580 | 2010-06-01 11:20:34 +0000 | [diff] [blame] | 294 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 295 | do { |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 296 | efx_recycle_rx_page(channel, rx_buf); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 297 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); |
| 298 | } while (--n_frags); |
Steve Hodgson | 2445580 | 2010-06-01 11:20:34 +0000 | [diff] [blame] | 299 | } |
| 300 | |
Ben Hutchings | 734d4e1 | 2013-07-04 23:48:46 +0100 | [diff] [blame] | 301 | static void efx_discard_rx_packet(struct efx_channel *channel, |
| 302 | struct efx_rx_buffer *rx_buf, |
| 303 | unsigned int n_frags) |
| 304 | { |
| 305 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
| 306 | |
| 307 | efx_recycle_rx_pages(channel, rx_buf, n_frags); |
| 308 | |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 309 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
Ben Hutchings | 734d4e1 | 2013-07-04 23:48:46 +0100 | [diff] [blame] | 310 | } |
| 311 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 312 | /** |
| 313 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly |
| 314 | * @rx_queue: RX descriptor queue |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 315 | * |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 316 | * This will aim to fill the RX descriptor queue up to |
David Riddoch | da9ca50 | 2012-04-11 13:09:24 +0100 | [diff] [blame] | 317 | * @rx_queue->@max_fill. If there is insufficient atomic |
Steve Hodgson | 90d683a | 2010-06-01 11:19:39 +0000 | [diff] [blame] | 318 | * memory to do so, a slow fill will be scheduled. |
| 319 | * |
| 320 | * The caller must provide serialisation (none is used here). In practise, |
| 321 | * this means this function must run from the NAPI handler, or be called |
| 322 | * when NAPI is disabled. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 323 | */ |
Jon Cooper | cce2879 | 2013-10-02 11:04:14 +0100 | [diff] [blame] | 324 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 325 | { |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 326 | struct efx_nic *efx = rx_queue->efx; |
| 327 | unsigned int fill_level, batch_size; |
Steve Hodgson | f7d6f37 | 2010-06-01 11:33:17 +0000 | [diff] [blame] | 328 | int space, rc = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 329 | |
Ben Hutchings | d8aec74 | 2013-05-27 16:52:54 +0100 | [diff] [blame] | 330 | if (!rx_queue->refill_enabled) |
| 331 | return; |
| 332 | |
Steve Hodgson | 90d683a | 2010-06-01 11:19:39 +0000 | [diff] [blame] | 333 | /* Calculate current fill level, and exit if we don't need to fill */ |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 334 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
Edward Cree | e01b16a | 2016-12-02 15:51:33 +0000 | [diff] [blame] | 335 | EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 336 | if (fill_level >= rx_queue->fast_fill_trigger) |
Steve Hodgson | 2445580 | 2010-06-01 11:20:34 +0000 | [diff] [blame] | 337 | goto out; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 338 | |
| 339 | /* Record minimum fill level */ |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 340 | if (unlikely(fill_level < rx_queue->min_fill)) { |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 341 | if (fill_level) |
| 342 | rx_queue->min_fill = fill_level; |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 343 | } |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 344 | |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 345 | batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; |
David Riddoch | da9ca50 | 2012-04-11 13:09:24 +0100 | [diff] [blame] | 346 | space = rx_queue->max_fill - fill_level; |
Edward Cree | e01b16a | 2016-12-02 15:51:33 +0000 | [diff] [blame] | 347 | EFX_WARN_ON_ONCE_PARANOID(space < batch_size); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 348 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 349 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
| 350 | "RX queue %d fast-filling descriptor ring from" |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 351 | " level %d to level %d\n", |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 352 | efx_rx_queue_index(rx_queue), fill_level, |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 353 | rx_queue->max_fill); |
| 354 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 355 | |
| 356 | do { |
Jon Cooper | cce2879 | 2013-10-02 11:04:14 +0100 | [diff] [blame] | 357 | rc = efx_init_rx_buffers(rx_queue, atomic); |
Steve Hodgson | f7d6f37 | 2010-06-01 11:33:17 +0000 | [diff] [blame] | 358 | if (unlikely(rc)) { |
| 359 | /* Ensure that we don't leave the rx queue empty */ |
Robert Stonehouse | 50f444a | 2019-02-14 17:27:43 +0000 | [diff] [blame] | 360 | efx_schedule_slow_fill(rx_queue); |
Steve Hodgson | f7d6f37 | 2010-06-01 11:33:17 +0000 | [diff] [blame] | 361 | goto out; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 362 | } |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 363 | } while ((space -= batch_size) >= batch_size); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 364 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 365 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
| 366 | "RX queue %d fast-filled descriptor ring " |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 367 | "to level %d\n", efx_rx_queue_index(rx_queue), |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 368 | rx_queue->added_count - rx_queue->removed_count); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 369 | |
| 370 | out: |
Steve Hodgson | 2445580 | 2010-06-01 11:20:34 +0000 | [diff] [blame] | 371 | if (rx_queue->notified_count != rx_queue->added_count) |
| 372 | efx_nic_notify_rx_desc(rx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 373 | } |
| 374 | |
Kees Cook | 7aa1402 | 2017-10-24 01:45:59 -0700 | [diff] [blame] | 375 | void efx_rx_slow_fill(struct timer_list *t) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 376 | { |
Kees Cook | 7aa1402 | 2017-10-24 01:45:59 -0700 | [diff] [blame] | 377 | struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 378 | |
Steve Hodgson | 90d683a | 2010-06-01 11:19:39 +0000 | [diff] [blame] | 379 | /* Post an event to cause NAPI to run and refill the queue */ |
Ben Hutchings | 2ae75da | 2012-02-07 23:49:52 +0000 | [diff] [blame] | 380 | efx_nic_generate_fill_event(rx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 381 | ++rx_queue->slow_fill_count; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 382 | } |
| 383 | |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 384 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
| 385 | struct efx_rx_buffer *rx_buf, |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 386 | int len) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 387 | { |
| 388 | struct efx_nic *efx = rx_queue->efx; |
| 389 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; |
| 390 | |
| 391 | if (likely(len <= max_len)) |
| 392 | return; |
| 393 | |
| 394 | /* The packet must be discarded, but this is only a fatal error |
| 395 | * if the caller indicated it was |
| 396 | */ |
Ben Hutchings | db33956 | 2011-08-26 18:05:11 +0100 | [diff] [blame] | 397 | rx_buf->flags |= EFX_RX_PKT_DISCARD; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 398 | |
Edward Cree | 5a6681e | 2016-11-28 18:55:34 +0000 | [diff] [blame] | 399 | if (net_ratelimit()) |
| 400 | netif_err(efx, rx_err, efx->net_dev, |
| 401 | "RX queue %d overlength RX event (%#x > %#x)\n", |
| 402 | efx_rx_queue_index(rx_queue), len, max_len); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 403 | |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 404 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 405 | } |
| 406 | |
Ben Hutchings | 61321d9 | 2012-02-25 01:58:35 +0000 | [diff] [blame] | 407 | /* Pass a received packet up through GRO. GRO can handle pages |
| 408 | * regardless of checksum state and skbs with a good checksum. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 409 | */ |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 410 | static void |
| 411 | efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, |
| 412 | unsigned int n_frags, u8 *eh) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 413 | { |
Herbert Xu | da3bc07 | 2009-01-18 21:50:16 -0800 | [diff] [blame] | 414 | struct napi_struct *napi = &channel->napi_str; |
Ben Hutchings | 18e1d2b | 2009-10-29 07:21:24 +0000 | [diff] [blame] | 415 | gro_result_t gro_result; |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 416 | struct efx_nic *efx = channel->efx; |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 417 | struct sk_buff *skb; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 418 | |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 419 | skb = napi_get_frags(napi); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 420 | if (unlikely(!skb)) { |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 421 | struct efx_rx_queue *rx_queue; |
| 422 | |
| 423 | rx_queue = efx_channel_get_rx_queue(channel); |
| 424 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 425 | return; |
| 426 | } |
Ben Hutchings | 1241e95 | 2009-11-23 16:02:25 +0000 | [diff] [blame] | 427 | |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 428 | if (efx->net_dev->features & NETIF_F_RXHASH) |
Tom Herbert | c7cb38a | 2013-12-17 23:31:50 -0800 | [diff] [blame] | 429 | skb_set_hash(skb, efx_rx_buf_hash(efx, eh), |
| 430 | PKT_HASH_TYPE_L3); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 431 | skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? |
| 432 | CHECKSUM_UNNECESSARY : CHECKSUM_NONE); |
Jon Cooper | da50ae2 | 2017-02-08 16:51:02 +0000 | [diff] [blame] | 433 | skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 434 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 435 | for (;;) { |
| 436 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
| 437 | rx_buf->page, rx_buf->page_offset, |
| 438 | rx_buf->len); |
| 439 | rx_buf->page = NULL; |
| 440 | skb->len += rx_buf->len; |
| 441 | if (skb_shinfo(skb)->nr_frags == n_frags) |
| 442 | break; |
| 443 | |
| 444 | rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); |
| 445 | } |
| 446 | |
| 447 | skb->data_len = skb->len; |
| 448 | skb->truesize += n_frags * efx->rx_buffer_truesize; |
| 449 | |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 450 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
Ben Hutchings | 3eadb7b | 2009-11-23 16:02:40 +0000 | [diff] [blame] | 451 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 452 | gro_result = napi_gro_frags(napi); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 453 | if (gro_result != GRO_DROP) |
Ben Hutchings | 18e1d2b | 2009-10-29 07:21:24 +0000 | [diff] [blame] | 454 | channel->irq_mod_score += 2; |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 455 | } |
| 456 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 457 | /* Allocate and construct an SKB around page fragments */ |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 458 | static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, |
| 459 | struct efx_rx_buffer *rx_buf, |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 460 | unsigned int n_frags, |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 461 | u8 *eh, int hdr_len) |
| 462 | { |
| 463 | struct efx_nic *efx = channel->efx; |
| 464 | struct sk_buff *skb; |
| 465 | |
| 466 | /* Allocate an SKB to store the headers */ |
Ben Hutchings | 2ccd0b1 | 2013-11-28 18:58:11 +0000 | [diff] [blame] | 467 | skb = netdev_alloc_skb(efx->net_dev, |
| 468 | efx->rx_ip_align + efx->rx_prefix_size + |
| 469 | hdr_len); |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 470 | if (unlikely(skb == NULL)) { |
| 471 | atomic_inc(&efx->n_rx_noskb_drops); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 472 | return NULL; |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 473 | } |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 474 | |
Edward Cree | e01b16a | 2016-12-02 15:51:33 +0000 | [diff] [blame] | 475 | EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 476 | |
Ben Hutchings | 2ccd0b1 | 2013-11-28 18:58:11 +0000 | [diff] [blame] | 477 | memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, |
| 478 | efx->rx_prefix_size + hdr_len); |
| 479 | skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); |
| 480 | __skb_put(skb, hdr_len); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 481 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 482 | /* Append the remaining page(s) onto the frag list */ |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 483 | if (rx_buf->len > hdr_len) { |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 484 | rx_buf->page_offset += hdr_len; |
| 485 | rx_buf->len -= hdr_len; |
| 486 | |
| 487 | for (;;) { |
| 488 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
| 489 | rx_buf->page, rx_buf->page_offset, |
| 490 | rx_buf->len); |
| 491 | rx_buf->page = NULL; |
| 492 | skb->len += rx_buf->len; |
| 493 | skb->data_len += rx_buf->len; |
| 494 | if (skb_shinfo(skb)->nr_frags == n_frags) |
| 495 | break; |
| 496 | |
| 497 | rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); |
| 498 | } |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 499 | } else { |
| 500 | __free_pages(rx_buf->page, efx->rx_buffer_order); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 501 | rx_buf->page = NULL; |
| 502 | n_frags = 0; |
Ben Hutchings | 18e1d2b | 2009-10-29 07:21:24 +0000 | [diff] [blame] | 503 | } |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 504 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 505 | skb->truesize += n_frags * efx->rx_buffer_truesize; |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 506 | |
| 507 | /* Move past the ethernet header */ |
| 508 | skb->protocol = eth_type_trans(skb, efx->net_dev); |
| 509 | |
Alexandre Rames | 3676326 | 2014-07-22 14:03:25 +0100 | [diff] [blame] | 510 | skb_mark_napi_id(skb, &channel->napi_str); |
| 511 | |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 512 | return skb; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 513 | } |
| 514 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 515 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 516 | unsigned int n_frags, unsigned int len, u16 flags) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 517 | { |
| 518 | struct efx_nic *efx = rx_queue->efx; |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 519 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 520 | struct efx_rx_buffer *rx_buf; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 521 | |
Andrew Rybchenko | 8ccf3800 | 2014-07-17 12:10:43 +0100 | [diff] [blame] | 522 | rx_queue->rx_packets++; |
| 523 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 524 | rx_buf = efx_rx_buffer(rx_queue, index); |
Ben Hutchings | 179ea7f | 2013-03-07 16:31:17 +0000 | [diff] [blame] | 525 | rx_buf->flags |= flags; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 526 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 527 | /* Validate the number of fragments and completed length */ |
| 528 | if (n_frags == 1) { |
Ben Hutchings | 3dced74 | 2013-04-27 01:55:18 +0100 | [diff] [blame] | 529 | if (!(flags & EFX_RX_PKT_PREFIX_LEN)) |
| 530 | efx_rx_packet__check_len(rx_queue, rx_buf, len); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 531 | } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || |
Jon Cooper | e8c68c0 | 2013-03-08 10:18:28 +0000 | [diff] [blame] | 532 | unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || |
| 533 | unlikely(len > n_frags * efx->rx_dma_len) || |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 534 | unlikely(!efx->rx_scatter)) { |
| 535 | /* If this isn't an explicit discard request, either |
| 536 | * the hardware or the driver is broken. |
| 537 | */ |
| 538 | WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); |
| 539 | rx_buf->flags |= EFX_RX_PKT_DISCARD; |
| 540 | } |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 541 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 542 | netif_vdbg(efx, rx_status, efx->net_dev, |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 543 | "RX queue %d received ids %x-%x len %d %s%s\n", |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 544 | efx_rx_queue_index(rx_queue), index, |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 545 | (index + n_frags - 1) & rx_queue->ptr_mask, len, |
Ben Hutchings | db33956 | 2011-08-26 18:05:11 +0100 | [diff] [blame] | 546 | (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", |
| 547 | (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 548 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 549 | /* Discard packet, if instructed to do so. Process the |
| 550 | * previous receive first. |
| 551 | */ |
Ben Hutchings | db33956 | 2011-08-26 18:05:11 +0100 | [diff] [blame] | 552 | if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 553 | efx_rx_flush_packet(channel); |
Ben Hutchings | 734d4e1 | 2013-07-04 23:48:46 +0100 | [diff] [blame] | 554 | efx_discard_rx_packet(channel, rx_buf, n_frags); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 555 | return; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 556 | } |
| 557 | |
Ben Hutchings | 3dced74 | 2013-04-27 01:55:18 +0100 | [diff] [blame] | 558 | if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 559 | rx_buf->len = len; |
| 560 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 561 | /* Release and/or sync the DMA mapping - assumes all RX buffers |
| 562 | * consumed in-order per RX queue. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 563 | */ |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 564 | efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 565 | |
| 566 | /* Prefetch nice and early so data will (hopefully) be in cache by |
| 567 | * the time we look at it. |
| 568 | */ |
Ben Hutchings | 5036b7c | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 569 | prefetch(efx_rx_buf_va(rx_buf)); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 570 | |
Jon Cooper | 43a3739 | 2012-10-18 15:49:54 +0100 | [diff] [blame] | 571 | rx_buf->page_offset += efx->rx_prefix_size; |
| 572 | rx_buf->len -= efx->rx_prefix_size; |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 573 | |
| 574 | if (n_frags > 1) { |
| 575 | /* Release/sync DMA mapping for additional fragments. |
| 576 | * Fix length for last fragment. |
| 577 | */ |
| 578 | unsigned int tail_frags = n_frags - 1; |
| 579 | |
| 580 | for (;;) { |
| 581 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); |
| 582 | if (--tail_frags == 0) |
| 583 | break; |
Jon Cooper | e8c68c0 | 2013-03-08 10:18:28 +0000 | [diff] [blame] | 584 | efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 585 | } |
Jon Cooper | e8c68c0 | 2013-03-08 10:18:28 +0000 | [diff] [blame] | 586 | rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 587 | efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 588 | } |
Ben Hutchings | b74e3e8 | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 589 | |
Ben Hutchings | 734d4e1 | 2013-07-04 23:48:46 +0100 | [diff] [blame] | 590 | /* All fragments have been DMA-synced, so recycle pages. */ |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 591 | rx_buf = efx_rx_buffer(rx_queue, index); |
Ben Hutchings | 734d4e1 | 2013-07-04 23:48:46 +0100 | [diff] [blame] | 592 | efx_recycle_rx_pages(channel, rx_buf, n_frags); |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 593 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 594 | /* Pipeline receives so that we give time for packet headers to be |
| 595 | * prefetched into cache. |
| 596 | */ |
Ben Hutchings | ff734ef | 2013-01-29 23:33:14 +0000 | [diff] [blame] | 597 | efx_rx_flush_packet(channel); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 598 | channel->rx_pkt_n_frags = n_frags; |
| 599 | channel->rx_pkt_index = index; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 600 | } |
| 601 | |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 602 | static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 603 | struct efx_rx_buffer *rx_buf, |
| 604 | unsigned int n_frags) |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 605 | { |
| 606 | struct sk_buff *skb; |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 607 | u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 608 | |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 609 | skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 610 | if (unlikely(skb == NULL)) { |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 611 | struct efx_rx_queue *rx_queue; |
| 612 | |
| 613 | rx_queue = efx_channel_get_rx_queue(channel); |
| 614 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 615 | return; |
| 616 | } |
| 617 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 618 | |
| 619 | /* Set the SKB flags */ |
| 620 | skb_checksum_none_assert(skb); |
Jon Cooper | da50ae2 | 2017-02-08 16:51:02 +0000 | [diff] [blame] | 621 | if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) { |
Jon Cooper | c99dffc | 2013-04-08 12:49:48 +0100 | [diff] [blame] | 622 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Jon Cooper | da50ae2 | 2017-02-08 16:51:02 +0000 | [diff] [blame] | 623 | skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); |
| 624 | } |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 625 | |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 626 | efx_rx_skb_attach_timestamp(channel, skb); |
| 627 | |
Stuart Hodgson | c31e5f9 | 2012-07-18 09:52:11 +0100 | [diff] [blame] | 628 | if (channel->type->receive_skb) |
Ben Hutchings | 4a74dc65 | 2013-03-05 20:13:54 +0000 | [diff] [blame] | 629 | if (channel->type->receive_skb(channel, skb)) |
Alexandre Rames | 97d48a1 | 2013-01-11 12:26:21 +0000 | [diff] [blame] | 630 | return; |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 631 | |
Ben Hutchings | 4a74dc65 | 2013-03-05 20:13:54 +0000 | [diff] [blame] | 632 | /* Pass the packet up */ |
Edward Cree | e090bfb | 2018-07-02 16:12:53 +0100 | [diff] [blame] | 633 | if (channel->rx_list != NULL) |
| 634 | /* Add to list, will pass up later */ |
| 635 | list_add_tail(&skb->list, channel->rx_list); |
| 636 | else |
| 637 | /* No list, so pass it up now */ |
| 638 | netif_receive_skb(skb); |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 639 | } |
| 640 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 641 | /* Handle a received packet. Second half: Touches packet payload. */ |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 642 | void __efx_rx_packet(struct efx_channel *channel) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 643 | { |
| 644 | struct efx_nic *efx = channel->efx; |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 645 | struct efx_rx_buffer *rx_buf = |
| 646 | efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); |
Ben Hutchings | b74e3e8 | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 647 | u8 *eh = efx_rx_buf_va(rx_buf); |
Ben Hutchings | 604f604 | 2010-06-25 07:05:33 +0000 | [diff] [blame] | 648 | |
Ben Hutchings | 3dced74 | 2013-04-27 01:55:18 +0100 | [diff] [blame] | 649 | /* Read length from the prefix if necessary. This already |
| 650 | * excludes the length of the prefix itself. |
| 651 | */ |
| 652 | if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) |
| 653 | rx_buf->len = le16_to_cpup((__le16 *) |
| 654 | (eh + efx->rx_packet_len_offset)); |
| 655 | |
Ben Hutchings | 3273c2e | 2008-05-07 13:36:19 +0100 | [diff] [blame] | 656 | /* If we're in loopback test, then pass the packet directly to the |
| 657 | * loopback layer, and free the rx_buf here |
| 658 | */ |
| 659 | if (unlikely(efx->loopback_selftest)) { |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 660 | struct efx_rx_queue *rx_queue; |
| 661 | |
Steve Hodgson | a526f14 | 2011-02-24 23:45:16 +0000 | [diff] [blame] | 662 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
Daniel Pieczko | 9eb0a5d | 2015-05-29 12:25:54 +0100 | [diff] [blame] | 663 | rx_queue = efx_channel_get_rx_queue(channel); |
| 664 | efx_free_rx_buffers(rx_queue, rx_buf, |
| 665 | channel->rx_pkt_n_frags); |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 666 | goto out; |
Ben Hutchings | 3273c2e | 2008-05-07 13:36:19 +0100 | [diff] [blame] | 667 | } |
| 668 | |
Ben Hutchings | abfe903 | 2011-04-05 15:00:02 +0100 | [diff] [blame] | 669 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
Ben Hutchings | db33956 | 2011-08-26 18:05:11 +0100 | [diff] [blame] | 670 | rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; |
Ben Hutchings | ab3cf6d | 2011-04-01 22:20:06 +0100 | [diff] [blame] | 671 | |
Eric Dumazet | e7fe949 | 2017-02-02 17:13:19 -0800 | [diff] [blame] | 672 | if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 673 | efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); |
Ben Hutchings | 1ddceb4 | 2012-01-23 22:41:30 +0000 | [diff] [blame] | 674 | else |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 675 | efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); |
| 676 | out: |
| 677 | channel->rx_pkt_n_frags = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 678 | } |
| 679 | |
| 680 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) |
| 681 | { |
| 682 | struct efx_nic *efx = rx_queue->efx; |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 683 | unsigned int entries; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 684 | int rc; |
| 685 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 686 | /* Create the smallest power-of-two aligned ring */ |
| 687 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); |
Edward Cree | e01b16a | 2016-12-02 15:51:33 +0000 | [diff] [blame] | 688 | EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 689 | rx_queue->ptr_mask = entries - 1; |
| 690 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 691 | netif_dbg(efx, probe, efx->net_dev, |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 692 | "creating RX queue %d size %#x mask %#x\n", |
| 693 | efx_rx_queue_index(rx_queue), efx->rxq_entries, |
| 694 | rx_queue->ptr_mask); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 695 | |
| 696 | /* Allocate RX buffers */ |
Thomas Meyer | c2e4e25 | 2011-12-02 12:36:13 +0000 | [diff] [blame] | 697 | rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 698 | GFP_KERNEL); |
Ben Hutchings | 8831da7 | 2008-09-01 12:47:48 +0100 | [diff] [blame] | 699 | if (!rx_queue->buffer) |
| 700 | return -ENOMEM; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 701 | |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 702 | rc = efx_nic_probe_rx(rx_queue); |
Ben Hutchings | 8831da7 | 2008-09-01 12:47:48 +0100 | [diff] [blame] | 703 | if (rc) { |
| 704 | kfree(rx_queue->buffer); |
| 705 | rx_queue->buffer = NULL; |
| 706 | } |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 707 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 708 | return rc; |
| 709 | } |
| 710 | |
stephen hemminger | debd003 | 2013-03-16 06:57:51 +0000 | [diff] [blame] | 711 | static void efx_init_rx_recycle_ring(struct efx_nic *efx, |
| 712 | struct efx_rx_queue *rx_queue) |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 713 | { |
| 714 | unsigned int bufs_in_recycle_ring, page_ring_size; |
| 715 | |
| 716 | /* Set the RX recycle ring size */ |
| 717 | #ifdef CONFIG_PPC64 |
| 718 | bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; |
| 719 | #else |
Ben Hutchings | 636d73d | 2013-06-12 18:09:08 +0100 | [diff] [blame] | 720 | if (iommu_present(&pci_bus_type)) |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 721 | bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; |
| 722 | else |
| 723 | bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; |
| 724 | #endif /* CONFIG_PPC64 */ |
| 725 | |
| 726 | page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / |
| 727 | efx->rx_bufs_per_page); |
| 728 | rx_queue->page_ring = kcalloc(page_ring_size, |
| 729 | sizeof(*rx_queue->page_ring), GFP_KERNEL); |
| 730 | rx_queue->page_ptr_mask = page_ring_size - 1; |
| 731 | } |
| 732 | |
Ben Hutchings | bc3c90a | 2008-09-01 12:48:46 +0100 | [diff] [blame] | 733 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 734 | { |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 735 | struct efx_nic *efx = rx_queue->efx; |
David Riddoch | 6423518 | 2012-04-11 13:12:41 +0100 | [diff] [blame] | 736 | unsigned int max_fill, trigger, max_trigger; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 737 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 738 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 739 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 740 | |
| 741 | /* Initialise ptr fields */ |
| 742 | rx_queue->added_count = 0; |
| 743 | rx_queue->notified_count = 0; |
| 744 | rx_queue->removed_count = 0; |
| 745 | rx_queue->min_fill = -1U; |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 746 | efx_init_rx_recycle_ring(efx, rx_queue); |
| 747 | |
| 748 | rx_queue->page_remove = 0; |
| 749 | rx_queue->page_add = rx_queue->page_ptr_mask + 1; |
| 750 | rx_queue->page_recycle_count = 0; |
| 751 | rx_queue->page_recycle_failed = 0; |
| 752 | rx_queue->page_recycle_full = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 753 | |
| 754 | /* Initialise limit fields */ |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 755 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; |
Daniel Pieczko | 1648a23 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 756 | max_trigger = |
| 757 | max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; |
David Riddoch | 6423518 | 2012-04-11 13:12:41 +0100 | [diff] [blame] | 758 | if (rx_refill_threshold != 0) { |
| 759 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
| 760 | if (trigger > max_trigger) |
| 761 | trigger = max_trigger; |
| 762 | } else { |
| 763 | trigger = max_trigger; |
| 764 | } |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 765 | |
| 766 | rx_queue->max_fill = max_fill; |
| 767 | rx_queue->fast_fill_trigger = trigger; |
Ben Hutchings | d8aec74 | 2013-05-27 16:52:54 +0100 | [diff] [blame] | 768 | rx_queue->refill_enabled = true; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 769 | |
| 770 | /* Set up RX descriptor ring */ |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 771 | efx_nic_init_rx(rx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 772 | } |
| 773 | |
| 774 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) |
| 775 | { |
| 776 | int i; |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 777 | struct efx_nic *efx = rx_queue->efx; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 778 | struct efx_rx_buffer *rx_buf; |
| 779 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 780 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 781 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 782 | |
Steve Hodgson | 90d683a | 2010-06-01 11:19:39 +0000 | [diff] [blame] | 783 | del_timer_sync(&rx_queue->slow_fill); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 784 | |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 785 | /* Release RX buffers from the current read ptr to the write ptr */ |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 786 | if (rx_queue->buffer) { |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 787 | for (i = rx_queue->removed_count; i < rx_queue->added_count; |
| 788 | i++) { |
| 789 | unsigned index = i & rx_queue->ptr_mask; |
| 790 | rx_buf = efx_rx_buffer(rx_queue, index); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 791 | efx_fini_rx_buffer(rx_queue, rx_buf); |
| 792 | } |
| 793 | } |
Daniel Pieczko | 2768935 | 2013-02-13 10:54:41 +0000 | [diff] [blame] | 794 | |
| 795 | /* Unmap and release the pages in the recycle ring. Remove the ring. */ |
| 796 | for (i = 0; i <= rx_queue->page_ptr_mask; i++) { |
| 797 | struct page *page = rx_queue->page_ring[i]; |
| 798 | struct efx_rx_page_state *state; |
| 799 | |
| 800 | if (page == NULL) |
| 801 | continue; |
| 802 | |
| 803 | state = page_address(page); |
| 804 | dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, |
| 805 | PAGE_SIZE << efx->rx_buffer_order, |
| 806 | DMA_FROM_DEVICE); |
| 807 | put_page(page); |
| 808 | } |
| 809 | kfree(rx_queue->page_ring); |
| 810 | rx_queue->page_ring = NULL; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 811 | } |
| 812 | |
| 813 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) |
| 814 | { |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 815 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
Ben Hutchings | ba1e8a3 | 2010-09-10 06:41:36 +0000 | [diff] [blame] | 816 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 817 | |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 818 | efx_nic_remove_rx(rx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 819 | |
| 820 | kfree(rx_queue->buffer); |
| 821 | rx_queue->buffer = NULL; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 822 | } |
| 823 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 824 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 825 | module_param(rx_refill_threshold, uint, 0444); |
| 826 | MODULE_PARM_DESC(rx_refill_threshold, |
David Riddoch | 6423518 | 2012-04-11 13:12:41 +0100 | [diff] [blame] | 827 | "RX descriptor ring refill threshold (%)"); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 828 | |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 829 | #ifdef CONFIG_RFS_ACCEL |
| 830 | |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 831 | static void efx_filter_rfs_work(struct work_struct *data) |
| 832 | { |
| 833 | struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, |
| 834 | work); |
| 835 | struct efx_nic *efx = netdev_priv(req->net_dev); |
| 836 | struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 837 | int slot_idx = req - efx->rps_slot; |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 838 | struct efx_arfs_rule *rule; |
| 839 | u16 arfs_id = 0; |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 840 | int rc; |
| 841 | |
Edward Cree | 494bef4 | 2018-04-13 19:17:22 +0100 | [diff] [blame] | 842 | rc = efx->type->filter_insert(efx, &req->spec, true); |
Edward Cree | ded8b9c | 2018-04-27 15:08:41 +0100 | [diff] [blame] | 843 | if (rc >= 0) |
| 844 | rc %= efx->type->max_rx_ip_filters; |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 845 | if (efx->rps_hash_table) { |
| 846 | spin_lock_bh(&efx->rps_hash_lock); |
| 847 | rule = efx_rps_hash_find(efx, &req->spec); |
| 848 | /* The rule might have already gone, if someone else's request |
| 849 | * for the same spec was already worked and then expired before |
| 850 | * we got around to our work. In that case we have nothing |
| 851 | * tying us to an arfs_id, meaning that as soon as the filter |
| 852 | * is considered for expiry it will be removed. |
| 853 | */ |
| 854 | if (rule) { |
| 855 | if (rc < 0) |
| 856 | rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; |
| 857 | else |
| 858 | rule->filter_id = rc; |
| 859 | arfs_id = rule->arfs_id; |
| 860 | } |
| 861 | spin_unlock_bh(&efx->rps_hash_lock); |
| 862 | } |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 863 | if (rc >= 0) { |
| 864 | /* Remember this so we can check whether to expire the filter |
| 865 | * later. |
| 866 | */ |
| 867 | mutex_lock(&efx->rps_mutex); |
| 868 | channel->rps_flow_id[rc] = req->flow_id; |
| 869 | ++channel->rfs_filters_added; |
| 870 | mutex_unlock(&efx->rps_mutex); |
| 871 | |
| 872 | if (req->spec.ether_type == htons(ETH_P_IP)) |
| 873 | netif_info(efx, rx_status, efx->net_dev, |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 874 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 875 | (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
| 876 | req->spec.rem_host, ntohs(req->spec.rem_port), |
| 877 | req->spec.loc_host, ntohs(req->spec.loc_port), |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 878 | req->rxq_index, req->flow_id, rc, arfs_id); |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 879 | else |
| 880 | netif_info(efx, rx_status, efx->net_dev, |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 881 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 882 | (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
| 883 | req->spec.rem_host, ntohs(req->spec.rem_port), |
| 884 | req->spec.loc_host, ntohs(req->spec.loc_port), |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 885 | req->rxq_index, req->flow_id, rc, arfs_id); |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 886 | } |
| 887 | |
| 888 | /* Release references */ |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 889 | clear_bit(slot_idx, &efx->rps_slot_map); |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 890 | dev_put(req->net_dev); |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 891 | } |
| 892 | |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 893 | int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, |
| 894 | u16 rxq_index, u32 flow_id) |
| 895 | { |
| 896 | struct efx_nic *efx = netdev_priv(net_dev); |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 897 | struct efx_async_filter_insertion *req; |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 898 | struct efx_arfs_rule *rule; |
Edward Cree | 68bb399e | 2016-05-26 21:46:05 +0100 | [diff] [blame] | 899 | struct flow_keys fk; |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 900 | int slot_idx; |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 901 | bool new; |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 902 | int rc; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 903 | |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 904 | /* find a free slot */ |
| 905 | for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) |
| 906 | if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) |
| 907 | break; |
| 908 | if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) |
| 909 | return -EBUSY; |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 910 | |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 911 | if (flow_id == RPS_FLOW_ID_INVALID) { |
| 912 | rc = -EINVAL; |
| 913 | goto out_clear; |
| 914 | } |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 915 | |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 916 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { |
| 917 | rc = -EPROTONOSUPPORT; |
| 918 | goto out_clear; |
| 919 | } |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 920 | |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 921 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { |
| 922 | rc = -EPROTONOSUPPORT; |
| 923 | goto out_clear; |
| 924 | } |
| 925 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { |
| 926 | rc = -EPROTONOSUPPORT; |
| 927 | goto out_clear; |
| 928 | } |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 929 | |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 930 | req = efx->rps_slot + slot_idx; |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 931 | efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 932 | efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, |
| 933 | rxq_index); |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 934 | req->spec.match_flags = |
Ben Hutchings | c47b2d9 | 2013-09-03 17:22:23 +0100 | [diff] [blame] | 935 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | |
| 936 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | |
| 937 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 938 | req->spec.ether_type = fk.basic.n_proto; |
| 939 | req->spec.ip_proto = fk.basic.ip_proto; |
Ben Hutchings | c47b2d9 | 2013-09-03 17:22:23 +0100 | [diff] [blame] | 940 | |
Edward Cree | 68bb399e | 2016-05-26 21:46:05 +0100 | [diff] [blame] | 941 | if (fk.basic.n_proto == htons(ETH_P_IP)) { |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 942 | req->spec.rem_host[0] = fk.addrs.v4addrs.src; |
| 943 | req->spec.loc_host[0] = fk.addrs.v4addrs.dst; |
Ben Hutchings | c47b2d9 | 2013-09-03 17:22:23 +0100 | [diff] [blame] | 944 | } else { |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 945 | memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, |
| 946 | sizeof(struct in6_addr)); |
| 947 | memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, |
| 948 | sizeof(struct in6_addr)); |
Ben Hutchings | c47b2d9 | 2013-09-03 17:22:23 +0100 | [diff] [blame] | 949 | } |
| 950 | |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 951 | req->spec.rem_port = fk.ports.src; |
| 952 | req->spec.loc_port = fk.ports.dst; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 953 | |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 954 | if (efx->rps_hash_table) { |
| 955 | /* Add it to ARFS hash table */ |
| 956 | spin_lock(&efx->rps_hash_lock); |
| 957 | rule = efx_rps_hash_add(efx, &req->spec, &new); |
| 958 | if (!rule) { |
| 959 | rc = -ENOMEM; |
| 960 | goto out_unlock; |
| 961 | } |
| 962 | if (new) |
| 963 | rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; |
| 964 | rc = rule->arfs_id; |
| 965 | /* Skip if existing or pending filter already does the right thing */ |
| 966 | if (!new && rule->rxq_index == rxq_index && |
| 967 | rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) |
| 968 | goto out_unlock; |
| 969 | rule->rxq_index = rxq_index; |
| 970 | rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; |
| 971 | spin_unlock(&efx->rps_hash_lock); |
| 972 | } else { |
| 973 | /* Without an ARFS hash table, we just use arfs_id 0 for all |
| 974 | * filters. This means if multiple flows hash to the same |
| 975 | * flow_id, all but the most recently touched will be eligible |
| 976 | * for expiry. |
| 977 | */ |
| 978 | rc = 0; |
| 979 | } |
| 980 | |
| 981 | /* Queue the request */ |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 982 | dev_hold(req->net_dev = net_dev); |
| 983 | INIT_WORK(&req->work, efx_filter_rfs_work); |
| 984 | req->rxq_index = rxq_index; |
| 985 | req->flow_id = flow_id; |
| 986 | schedule_work(&req->work); |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 987 | return rc; |
| 988 | out_unlock: |
| 989 | spin_unlock(&efx->rps_hash_lock); |
Edward Cree | f993740 | 2018-04-13 19:18:09 +0100 | [diff] [blame] | 990 | out_clear: |
| 991 | clear_bit(slot_idx, &efx->rps_slot_map); |
| 992 | return rc; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 993 | } |
| 994 | |
| 995 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) |
| 996 | { |
| 997 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 998 | unsigned int channel_idx, index, size; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 999 | u32 flow_id; |
| 1000 | |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 1001 | if (!mutex_trylock(&efx->rps_mutex)) |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1002 | return false; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1003 | expire_one = efx->type->filter_rfs_expire_one; |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 1004 | channel_idx = efx->rps_expire_channel; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1005 | index = efx->rps_expire_index; |
| 1006 | size = efx->type->max_rx_ip_filters; |
| 1007 | while (quota--) { |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 1008 | struct efx_channel *channel = efx_get_channel(efx, channel_idx); |
| 1009 | flow_id = channel->rps_flow_id[index]; |
| 1010 | |
| 1011 | if (flow_id != RPS_FLOW_ID_INVALID && |
| 1012 | expire_one(efx, flow_id, index)) { |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1013 | netif_info(efx, rx_status, efx->net_dev, |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 1014 | "expired filter %d [queue %u flow %u]\n", |
| 1015 | index, channel_idx, flow_id); |
| 1016 | channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; |
| 1017 | } |
| 1018 | if (++index == size) { |
| 1019 | if (++channel_idx == efx->n_channels) |
| 1020 | channel_idx = 0; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1021 | index = 0; |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 1022 | } |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1023 | } |
Jon Cooper | faf8dcc | 2016-05-31 19:12:32 +0100 | [diff] [blame] | 1024 | efx->rps_expire_channel = channel_idx; |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1025 | efx->rps_expire_index = index; |
| 1026 | |
Edward Cree | 3af0f34 | 2018-03-27 17:41:59 +0100 | [diff] [blame] | 1027 | mutex_unlock(&efx->rps_mutex); |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 1028 | return true; |
| 1029 | } |
| 1030 | |
| 1031 | #endif /* CONFIG_RFS_ACCEL */ |
Ben Hutchings | b883d0b | 2013-01-15 22:00:07 +0000 | [diff] [blame] | 1032 | |
| 1033 | /** |
| 1034 | * efx_filter_is_mc_recipient - test whether spec is a multicast recipient |
| 1035 | * @spec: Specification to test |
| 1036 | * |
| 1037 | * Return: %true if the specification is a non-drop RX filter that |
| 1038 | * matches a local MAC address I/G bit value of 1 or matches a local |
| 1039 | * IPv4 or IPv6 address value in the respective multicast address |
| 1040 | * range. Otherwise %false. |
| 1041 | */ |
| 1042 | bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) |
| 1043 | { |
| 1044 | if (!(spec->flags & EFX_FILTER_FLAG_RX) || |
| 1045 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) |
| 1046 | return false; |
| 1047 | |
| 1048 | if (spec->match_flags & |
| 1049 | (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && |
| 1050 | is_multicast_ether_addr(spec->loc_mac)) |
| 1051 | return true; |
| 1052 | |
| 1053 | if ((spec->match_flags & |
| 1054 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == |
| 1055 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { |
| 1056 | if (spec->ether_type == htons(ETH_P_IP) && |
| 1057 | ipv4_is_multicast(spec->loc_host[0])) |
| 1058 | return true; |
| 1059 | if (spec->ether_type == htons(ETH_P_IPV6) && |
| 1060 | ((const u8 *)spec->loc_host)[0] == 0xff) |
| 1061 | return true; |
| 1062 | } |
| 1063 | |
| 1064 | return false; |
| 1065 | } |