blob: ad099fd01b45ae947492e828337c76df6d701587 [file] [log] [blame]
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001/*
Jamie Ilesf75ba502011-11-08 10:12:32 +00002 * Cadence MACB/GEM Ethernet Controller driver
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
Jamie Ilesc220f8c2011-03-08 20:27:08 +000011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010012#include <linux/clk.h>
Claudiu Beznea653e92a2018-08-07 12:25:14 +030013#include <linux/crc32.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010014#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
Nicolas Ferre909a8582012-11-19 06:00:21 +000018#include <linux/circ_buf.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010019#include <linux/slab.h>
20#include <linux/init.h>
Soren Brinkmann60fe7162013-12-10 16:07:21 -080021#include <linux/io.h>
Joachim Eastwood2dbfdbb92012-11-11 13:56:27 +000022#include <linux/gpio.h>
Gregory CLEMENT270c4992015-12-17 10:51:04 +010023#include <linux/gpio/consumer.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000024#include <linux/interrupt.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010025#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010027#include <linux/dma-mapping.h>
Jamie Iles84e0cdb2011-03-08 20:17:06 +000028#include <linux/platform_data/macb.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010029#include <linux/platform_device.h>
frederic RODO6c36a702007-07-12 19:07:24 +020030#include <linux/phy.h>
Olof Johanssonb17471f2011-12-20 13:13:07 -080031#include <linux/of.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010032#include <linux/of_device.h>
Gregory CLEMENT270c4992015-12-17 10:51:04 +010033#include <linux/of_gpio.h>
Boris BREZILLON148cbb52013-08-22 17:57:28 +020034#include <linux/of_mdio.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010035#include <linux/of_net.h>
Rafal Ozieblo1629dd42016-11-16 10:02:34 +000036#include <linux/ip.h>
37#include <linux/udp.h>
38#include <linux/tcp.h>
Harini Katakam8beb79b2019-03-01 16:20:32 +053039#include <linux/iopoll.h>
Harini Katakamd54f89a2019-03-01 16:20:34 +053040#include <linux/pm_runtime.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010041#include "macb.h"
42
Nicolas Ferre1b447912013-06-04 21:57:11 +000043#define MACB_RX_BUFFER_SIZE 128
Nicolas Ferre1b447912013-06-04 21:57:11 +000044#define RX_BUFFER_MULTIPLE 64 /* bytes */
Zach Brown8441bb32016-10-19 09:56:58 -050045
Zach Brownb410d132016-10-19 09:56:57 -050046#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
Zach Brown8441bb32016-10-19 09:56:58 -050047#define MIN_RX_RING_SIZE 64
48#define MAX_RX_RING_SIZE 8192
Rafal Ozieblodc97a892017-01-27 15:08:20 +000049#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
Zach Brownb410d132016-10-19 09:56:57 -050050 * (bp)->rx_ring_size)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010051
Zach Brownb410d132016-10-19 09:56:57 -050052#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
Zach Brown8441bb32016-10-19 09:56:58 -050053#define MIN_TX_RING_SIZE 64
54#define MAX_TX_RING_SIZE 4096
Rafal Ozieblodc97a892017-01-27 15:08:20 +000055#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
Zach Brownb410d132016-10-19 09:56:57 -050056 * (bp)->tx_ring_size)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010057
Nicolas Ferre909a8582012-11-19 06:00:21 +000058/* level of occupied TX descriptors under which we wake up TX process */
Zach Brownb410d132016-10-19 09:56:57 -050059#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010060
Harini Katakame5010702019-01-29 15:20:03 +053061#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
Nicolas Ferree86cd532012-10-31 06:04:57 +000062#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
63 | MACB_BIT(ISR_RLE) \
64 | MACB_BIT(TXERR))
Claudiu Beznea42983882018-12-17 10:02:42 +000065#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
66 | MACB_BIT(TXUBR))
Nicolas Ferree86cd532012-10-31 06:04:57 +000067
Rafal Ozieblo1629dd42016-11-16 10:02:34 +000068/* Max length of transmit frame must be a multiple of 8 bytes */
69#define MACB_TX_LEN_ALIGN 8
70#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
71#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +020072
Jarod Wilson44770e12016-10-17 15:54:17 -040073#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
David S. Millerf9c45ae2017-07-03 06:31:05 -070074#define MACB_NETIF_LSO NETIF_F_TSO
Harini Katakama5898ea2015-05-06 22:27:18 +053075
Sergio Prado3e2a5e12016-02-09 12:07:16 -020076#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
77#define MACB_WOL_ENABLED (0x1 << 1)
78
Moritz Fischer64ec42f2016-03-29 19:11:12 -070079/* Graceful stop timeouts in us. We should allow up to
Nicolas Ferree86cd532012-10-31 06:04:57 +000080 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
81 */
82#define MACB_HALT_TIMEOUT 1230
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010083
Harini Katakamd54f89a2019-03-01 16:20:34 +053084#define MACB_PM_TIMEOUT 100 /* ms */
85
Harini Katakam8beb79b2019-03-01 16:20:32 +053086#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
87
Rafal Ozieblodc97a892017-01-27 15:08:20 +000088/* DMA buffer descriptor might be different size
Rafal Ozieblo7b429612017-06-29 07:12:51 +010089 * depends on hardware configuration:
90 *
91 * 1. dma address width 32 bits:
92 * word 1: 32 bit address of Data Buffer
93 * word 2: control
94 *
95 * 2. dma address width 64 bits:
96 * word 1: 32 bit address of Data Buffer
97 * word 2: control
98 * word 3: upper 32 bit address of Data Buffer
99 * word 4: unused
100 *
101 * 3. dma address width 32 bits with hardware timestamping:
102 * word 1: 32 bit address of Data Buffer
103 * word 2: control
104 * word 3: timestamp word 1
105 * word 4: timestamp word 2
106 *
107 * 4. dma address width 64 bits with hardware timestamping:
108 * word 1: 32 bit address of Data Buffer
109 * word 2: control
110 * word 3: upper 32 bit address of Data Buffer
111 * word 4: unused
112 * word 5: timestamp word 1
113 * word 6: timestamp word 2
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000114 */
115static unsigned int macb_dma_desc_get_size(struct macb *bp)
116{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100117#ifdef MACB_EXT_DESC
118 unsigned int desc_size;
119
120 switch (bp->hw_dma_cap) {
121 case HW_DMA_CAP_64B:
122 desc_size = sizeof(struct macb_dma_desc)
123 + sizeof(struct macb_dma_desc_64);
124 break;
125 case HW_DMA_CAP_PTP:
126 desc_size = sizeof(struct macb_dma_desc)
127 + sizeof(struct macb_dma_desc_ptp);
128 break;
129 case HW_DMA_CAP_64B_PTP:
130 desc_size = sizeof(struct macb_dma_desc)
131 + sizeof(struct macb_dma_desc_64)
132 + sizeof(struct macb_dma_desc_ptp);
133 break;
134 default:
135 desc_size = sizeof(struct macb_dma_desc);
136 }
137 return desc_size;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000138#endif
139 return sizeof(struct macb_dma_desc);
140}
141
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100142static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000143{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100144#ifdef MACB_EXT_DESC
145 switch (bp->hw_dma_cap) {
146 case HW_DMA_CAP_64B:
147 case HW_DMA_CAP_PTP:
148 desc_idx <<= 1;
149 break;
150 case HW_DMA_CAP_64B_PTP:
151 desc_idx *= 3;
152 break;
153 default:
154 break;
155 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000156#endif
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100157 return desc_idx;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000158}
159
160#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
161static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
162{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100163 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
164 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
165 return NULL;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000166}
167#endif
168
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000169/* Ring buffer accessors */
Zach Brownb410d132016-10-19 09:56:57 -0500170static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000171{
Zach Brownb410d132016-10-19 09:56:57 -0500172 return index & (bp->tx_ring_size - 1);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000173}
174
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100175static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
176 unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000177{
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000178 index = macb_tx_ring_wrap(queue->bp, index);
179 index = macb_adj_dma_desc_idx(queue->bp, index);
180 return &queue->tx_ring[index];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000181}
182
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100183static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
184 unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000185{
Zach Brownb410d132016-10-19 09:56:57 -0500186 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000187}
188
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100189static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000190{
191 dma_addr_t offset;
192
Zach Brownb410d132016-10-19 09:56:57 -0500193 offset = macb_tx_ring_wrap(queue->bp, index) *
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000194 macb_dma_desc_get_size(queue->bp);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000195
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100196 return queue->tx_ring_dma + offset;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000197}
198
Zach Brownb410d132016-10-19 09:56:57 -0500199static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000200{
Zach Brownb410d132016-10-19 09:56:57 -0500201 return index & (bp->rx_ring_size - 1);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000202}
203
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000204static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000205{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000206 index = macb_rx_ring_wrap(queue->bp, index);
207 index = macb_adj_dma_desc_idx(queue->bp, index);
208 return &queue->rx_ring[index];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000209}
210
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000211static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000212{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000213 return queue->rx_buffers + queue->bp->rx_buffer_size *
214 macb_rx_ring_wrap(queue->bp, index);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000215}
216
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +0300217/* I/O accessors */
218static u32 hw_readl_native(struct macb *bp, int offset)
219{
220 return __raw_readl(bp->regs + offset);
221}
222
223static void hw_writel_native(struct macb *bp, int offset, u32 value)
224{
225 __raw_writel(value, bp->regs + offset);
226}
227
228static u32 hw_readl(struct macb *bp, int offset)
229{
230 return readl_relaxed(bp->regs + offset);
231}
232
233static void hw_writel(struct macb *bp, int offset, u32 value)
234{
235 writel_relaxed(value, bp->regs + offset);
236}
237
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700238/* Find the CPU endianness by using the loopback bit of NCR register. When the
Moritz Fischer88023be2016-03-29 19:11:15 -0700239 * CPU is in big endian we need to program swapped mode for management
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +0300240 * descriptor access.
241 */
242static bool hw_is_native_io(void __iomem *addr)
243{
244 u32 value = MACB_BIT(LLB);
245
246 __raw_writel(value, addr + MACB_NCR);
247 value = __raw_readl(addr + MACB_NCR);
248
249 /* Write 0 back to disable everything */
250 __raw_writel(0, addr + MACB_NCR);
251
252 return value == MACB_BIT(LLB);
253}
254
255static bool hw_is_gem(void __iomem *addr, bool native_io)
256{
257 u32 id;
258
259 if (native_io)
260 id = __raw_readl(addr + MACB_MID);
261 else
262 id = readl_relaxed(addr + MACB_MID);
263
264 return MACB_BFEXT(IDNUM, id) >= 0x2;
265}
266
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100267static void macb_set_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100268{
269 u32 bottom;
270 u16 top;
271
272 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000273 macb_or_gem_writel(bp, SA1B, bottom);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100274 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000275 macb_or_gem_writel(bp, SA1T, top);
Joachim Eastwood3629a6c2012-11-11 13:56:28 +0000276
277 /* Clear unused address register sets */
278 macb_or_gem_writel(bp, SA2B, 0);
279 macb_or_gem_writel(bp, SA2T, 0);
280 macb_or_gem_writel(bp, SA3B, 0);
281 macb_or_gem_writel(bp, SA3T, 0);
282 macb_or_gem_writel(bp, SA4B, 0);
283 macb_or_gem_writel(bp, SA4T, 0);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100284}
285
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100286static void macb_get_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100287{
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000288 struct macb_platform_data *pdata;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100289 u32 bottom;
290 u16 top;
291 u8 addr[6];
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000292 int i;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100293
Jingoo Hanc607a0d2013-08-30 14:12:21 +0900294 pdata = dev_get_platdata(&bp->pdev->dev);
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000295
Moritz Fischeraa50b552016-03-29 19:11:13 -0700296 /* Check all 4 address register for valid address */
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000297 for (i = 0; i < 4; i++) {
298 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
299 top = macb_or_gem_readl(bp, SA1T + i * 8);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100300
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000301 if (pdata && pdata->rev_eth_addr) {
302 addr[5] = bottom & 0xff;
303 addr[4] = (bottom >> 8) & 0xff;
304 addr[3] = (bottom >> 16) & 0xff;
305 addr[2] = (bottom >> 24) & 0xff;
306 addr[1] = top & 0xff;
307 addr[0] = (top & 0xff00) >> 8;
308 } else {
309 addr[0] = bottom & 0xff;
310 addr[1] = (bottom >> 8) & 0xff;
311 addr[2] = (bottom >> 16) & 0xff;
312 addr[3] = (bottom >> 24) & 0xff;
313 addr[4] = top & 0xff;
314 addr[5] = (top >> 8) & 0xff;
315 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100316
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000317 if (is_valid_ether_addr(addr)) {
318 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
319 return;
320 }
Sven Schnelled1d57412008-06-09 16:33:57 -0700321 }
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000322
Andy Shevchenkoa35919e2015-07-24 21:24:01 +0300323 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000324 eth_hw_addr_random(bp->dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100325}
326
Harini Katakam8beb79b2019-03-01 16:20:32 +0530327static int macb_mdio_wait_for_idle(struct macb *bp)
328{
329 u32 val;
330
331 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
332 1, MACB_MDIO_TIMEOUT);
333}
334
frederic RODO6c36a702007-07-12 19:07:24 +0200335static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100336{
frederic RODO6c36a702007-07-12 19:07:24 +0200337 struct macb *bp = bus->priv;
Harini Katakamd54f89a2019-03-01 16:20:34 +0530338 int status;
Harini Katakam8beb79b2019-03-01 16:20:32 +0530339
Harini Katakamd54f89a2019-03-01 16:20:34 +0530340 status = pm_runtime_get_sync(&bp->pdev->dev);
341 if (status < 0)
342 goto mdio_pm_exit;
343
344 status = macb_mdio_wait_for_idle(bp);
345 if (status < 0)
346 goto mdio_read_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100347
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100348 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
349 | MACB_BF(RW, MACB_MAN_READ)
frederic RODO6c36a702007-07-12 19:07:24 +0200350 | MACB_BF(PHYA, mii_id)
351 | MACB_BF(REGA, regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100352 | MACB_BF(CODE, MACB_MAN_CODE)));
353
Harini Katakamd54f89a2019-03-01 16:20:34 +0530354 status = macb_mdio_wait_for_idle(bp);
355 if (status < 0)
356 goto mdio_read_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100357
Harini Katakamd54f89a2019-03-01 16:20:34 +0530358 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100359
Harini Katakamd54f89a2019-03-01 16:20:34 +0530360mdio_read_exit:
361 pm_runtime_mark_last_busy(&bp->pdev->dev);
362 pm_runtime_put_autosuspend(&bp->pdev->dev);
363mdio_pm_exit:
364 return status;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100365}
366
frederic RODO6c36a702007-07-12 19:07:24 +0200367static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
368 u16 value)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100369{
frederic RODO6c36a702007-07-12 19:07:24 +0200370 struct macb *bp = bus->priv;
Harini Katakamd54f89a2019-03-01 16:20:34 +0530371 int status;
Harini Katakam8beb79b2019-03-01 16:20:32 +0530372
Harini Katakamd54f89a2019-03-01 16:20:34 +0530373 status = pm_runtime_get_sync(&bp->pdev->dev);
374 if (status < 0)
375 goto mdio_pm_exit;
376
377 status = macb_mdio_wait_for_idle(bp);
378 if (status < 0)
379 goto mdio_write_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100380
381 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
382 | MACB_BF(RW, MACB_MAN_WRITE)
frederic RODO6c36a702007-07-12 19:07:24 +0200383 | MACB_BF(PHYA, mii_id)
384 | MACB_BF(REGA, regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100385 | MACB_BF(CODE, MACB_MAN_CODE)
frederic RODO6c36a702007-07-12 19:07:24 +0200386 | MACB_BF(DATA, value)));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100387
Harini Katakamd54f89a2019-03-01 16:20:34 +0530388 status = macb_mdio_wait_for_idle(bp);
389 if (status < 0)
390 goto mdio_write_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100391
Harini Katakamd54f89a2019-03-01 16:20:34 +0530392mdio_write_exit:
393 pm_runtime_mark_last_busy(&bp->pdev->dev);
394 pm_runtime_put_autosuspend(&bp->pdev->dev);
395mdio_pm_exit:
396 return status;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100397}
398
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800399/**
400 * macb_set_tx_clk() - Set a clock to a new frequency
401 * @clk Pointer to the clock to change
402 * @rate New frequency in Hz
403 * @dev Pointer to the struct net_device
404 */
405static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
406{
407 long ferr, rate, rate_rounded;
408
Cyrille Pitchen93b31f42015-03-07 07:23:31 +0100409 if (!clk)
410 return;
411
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800412 switch (speed) {
413 case SPEED_10:
414 rate = 2500000;
415 break;
416 case SPEED_100:
417 rate = 25000000;
418 break;
419 case SPEED_1000:
420 rate = 125000000;
421 break;
422 default:
Soren Brinkmann9319e472013-12-10 20:57:57 -0800423 return;
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800424 }
425
426 rate_rounded = clk_round_rate(clk, rate);
427 if (rate_rounded < 0)
428 return;
429
430 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
431 * is not satisfied.
432 */
433 ferr = abs(rate_rounded - rate);
434 ferr = DIV_ROUND_UP(ferr, rate / 100000);
435 if (ferr > 5)
436 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700437 rate);
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800438
439 if (clk_set_rate(clk, rate_rounded))
440 netdev_err(dev, "adjusting tx_clk failed.\n");
441}
442
frederic RODO6c36a702007-07-12 19:07:24 +0200443static void macb_handle_link_change(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100444{
frederic RODO6c36a702007-07-12 19:07:24 +0200445 struct macb *bp = netdev_priv(dev);
Philippe Reynes0a912812016-06-22 00:32:35 +0200446 struct phy_device *phydev = dev->phydev;
frederic RODO6c36a702007-07-12 19:07:24 +0200447 unsigned long flags;
frederic RODO6c36a702007-07-12 19:07:24 +0200448 int status_change = 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100449
frederic RODO6c36a702007-07-12 19:07:24 +0200450 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100451
frederic RODO6c36a702007-07-12 19:07:24 +0200452 if (phydev->link) {
453 if ((bp->speed != phydev->speed) ||
454 (bp->duplex != phydev->duplex)) {
455 u32 reg;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100456
frederic RODO6c36a702007-07-12 19:07:24 +0200457 reg = macb_readl(bp, NCFGR);
458 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
Patrice Vilchez140b7552012-10-31 06:04:50 +0000459 if (macb_is_gem(bp))
460 reg &= ~GEM_BIT(GBE);
frederic RODO6c36a702007-07-12 19:07:24 +0200461
462 if (phydev->duplex)
463 reg |= MACB_BIT(FD);
Atsushi Nemoto179956f2008-02-21 22:50:54 +0900464 if (phydev->speed == SPEED_100)
frederic RODO6c36a702007-07-12 19:07:24 +0200465 reg |= MACB_BIT(SPD);
Nicolas Ferree1755872014-07-24 13:50:58 +0200466 if (phydev->speed == SPEED_1000 &&
467 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
Patrice Vilchez140b7552012-10-31 06:04:50 +0000468 reg |= GEM_BIT(GBE);
frederic RODO6c36a702007-07-12 19:07:24 +0200469
Patrice Vilchez140b7552012-10-31 06:04:50 +0000470 macb_or_gem_writel(bp, NCFGR, reg);
frederic RODO6c36a702007-07-12 19:07:24 +0200471
472 bp->speed = phydev->speed;
473 bp->duplex = phydev->duplex;
474 status_change = 1;
475 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100476 }
477
frederic RODO6c36a702007-07-12 19:07:24 +0200478 if (phydev->link != bp->link) {
Anton Vorontsovc8f15682008-07-22 15:41:24 -0700479 if (!phydev->link) {
frederic RODO6c36a702007-07-12 19:07:24 +0200480 bp->speed = 0;
481 bp->duplex = -1;
482 }
483 bp->link = phydev->link;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100484
frederic RODO6c36a702007-07-12 19:07:24 +0200485 status_change = 1;
486 }
487
488 spin_unlock_irqrestore(&bp->lock, flags);
489
490 if (status_change) {
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000491 if (phydev->link) {
Jaeden Amero2c29b232015-03-12 18:07:54 -0500492 /* Update the TX clock rate if and only if the link is
493 * up and there has been a link change.
494 */
495 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
496
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000497 netif_carrier_on(dev);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000498 netdev_info(dev, "link up (%d/%s)\n",
499 phydev->speed,
500 phydev->duplex == DUPLEX_FULL ?
501 "Full" : "Half");
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000502 } else {
503 netif_carrier_off(dev);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000504 netdev_info(dev, "link down\n");
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000505 }
frederic RODO6c36a702007-07-12 19:07:24 +0200506 }
507}
508
509/* based on au1000_eth. c*/
510static int macb_mii_probe(struct net_device *dev)
511{
512 struct macb *bp = netdev_priv(dev);
Joachim Eastwood2dbfdbb92012-11-11 13:56:27 +0000513 struct macb_platform_data *pdata;
Jiri Pirko7455a762010-02-08 05:12:08 +0000514 struct phy_device *phydev;
Brad Mouring739de9a2018-03-13 16:32:13 -0500515 struct device_node *np;
516 int phy_irq, ret, i;
517
518 pdata = dev_get_platdata(&bp->pdev->dev);
519 np = bp->pdev->dev.of_node;
520 ret = 0;
521
522 if (np) {
523 if (of_phy_is_fixed_link(np)) {
Brad Mouring739de9a2018-03-13 16:32:13 -0500524 bp->phy_node = of_node_get(np);
525 } else {
Brad Mouring2105a5d2018-03-13 16:32:15 -0500526 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
527 /* fallback to standard phy registration if no
528 * phy-handle was found nor any phy found during
529 * dt phy registration
Brad Mouring739de9a2018-03-13 16:32:13 -0500530 */
Brad Mouring2105a5d2018-03-13 16:32:15 -0500531 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
Brad Mouring739de9a2018-03-13 16:32:13 -0500532 for (i = 0; i < PHY_MAX_ADDR; i++) {
533 struct phy_device *phydev;
534
535 phydev = mdiobus_scan(bp->mii_bus, i);
536 if (IS_ERR(phydev) &&
537 PTR_ERR(phydev) != -ENODEV) {
538 ret = PTR_ERR(phydev);
539 break;
540 }
541 }
542
543 if (ret)
544 return -ENODEV;
545 }
546 }
547 }
frederic RODO6c36a702007-07-12 19:07:24 +0200548
Michael Grzeschikdacdbb42017-06-23 16:54:10 +0200549 if (bp->phy_node) {
550 phydev = of_phy_connect(dev, bp->phy_node,
551 &macb_handle_link_change, 0,
552 bp->phy_interface);
553 if (!phydev)
554 return -ENODEV;
555 } else {
556 phydev = phy_find_first(bp->mii_bus);
557 if (!phydev) {
558 netdev_err(dev, "no PHY found\n");
559 return -ENXIO;
Joachim Eastwood2dbfdbb92012-11-11 13:56:27 +0000560 }
frederic RODO6c36a702007-07-12 19:07:24 +0200561
Michael Grzeschikdacdbb42017-06-23 16:54:10 +0200562 if (pdata) {
563 if (gpio_is_valid(pdata->phy_irq_pin)) {
564 ret = devm_gpio_request(&bp->pdev->dev,
565 pdata->phy_irq_pin, "phy int");
566 if (!ret) {
567 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
568 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
569 }
570 } else {
571 phydev->irq = PHY_POLL;
572 }
573 }
574
575 /* attach the mac to the phy */
576 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
577 bp->phy_interface);
578 if (ret) {
579 netdev_err(dev, "Could not attach to PHY\n");
580 return ret;
581 }
frederic RODO6c36a702007-07-12 19:07:24 +0200582 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100583
frederic RODO6c36a702007-07-12 19:07:24 +0200584 /* mask with MAC supported features */
Nicolas Ferree1755872014-07-24 13:50:58 +0200585 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
Andrew Lunn58056c12018-09-12 01:53:11 +0200586 phy_set_max_speed(phydev, SPEED_1000);
Patrice Vilchez140b7552012-10-31 06:04:50 +0000587 else
Andrew Lunn58056c12018-09-12 01:53:11 +0200588 phy_set_max_speed(phydev, SPEED_100);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100589
Nathan Sullivan222ca8e2015-05-22 09:22:10 -0500590 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
Andrew Lunn41124fa2018-09-12 01:53:14 +0200591 phy_remove_link_mode(phydev,
592 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100593
frederic RODO6c36a702007-07-12 19:07:24 +0200594 bp->link = 0;
595 bp->speed = 0;
596 bp->duplex = -1;
frederic RODO6c36a702007-07-12 19:07:24 +0200597
598 return 0;
599}
600
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100601static int macb_mii_init(struct macb *bp)
frederic RODO6c36a702007-07-12 19:07:24 +0200602{
Jamie Iles84e0cdb2011-03-08 20:17:06 +0000603 struct macb_platform_data *pdata;
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200604 struct device_node *np;
Ahmad Fatoumab5f1102018-08-21 17:35:48 +0200605 int err = -ENXIO;
frederic RODO6c36a702007-07-12 19:07:24 +0200606
Uwe Kleine-Koenig3dbda772009-07-23 08:31:31 +0200607 /* Enable management port */
frederic RODO6c36a702007-07-12 19:07:24 +0200608 macb_writel(bp, NCR, MACB_BIT(MPE));
609
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700610 bp->mii_bus = mdiobus_alloc();
Moritz Fischeraa50b552016-03-29 19:11:13 -0700611 if (!bp->mii_bus) {
frederic RODO6c36a702007-07-12 19:07:24 +0200612 err = -ENOMEM;
613 goto err_out;
614 }
615
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700616 bp->mii_bus->name = "MACB_mii_bus";
617 bp->mii_bus->read = &macb_mdio_read;
618 bp->mii_bus->write = &macb_mdio_write;
Florian Fainelli98d5e572012-01-09 23:59:11 +0000619 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700620 bp->pdev->name, bp->pdev->id);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700621 bp->mii_bus->priv = bp;
Florian Fainellicf669662016-05-02 18:38:45 -0700622 bp->mii_bus->parent = &bp->pdev->dev;
Jingoo Hanc607a0d2013-08-30 14:12:21 +0900623 pdata = dev_get_platdata(&bp->pdev->dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700624
Jamie Iles91523942011-02-28 04:05:25 +0000625 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200626
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200627 np = bp->pdev->dev.of_node;
Ahmad Fatoumab5f1102018-08-21 17:35:48 +0200628 if (np && of_phy_is_fixed_link(np)) {
629 if (of_phy_register_fixed_link(np) < 0) {
630 dev_err(&bp->pdev->dev,
631 "broken fixed-link specification %pOF\n", np);
632 goto err_out_free_mdiobus;
633 }
Brad Mouring739de9a2018-03-13 16:32:13 -0500634
Ahmad Fatoumab5f1102018-08-21 17:35:48 +0200635 err = mdiobus_register(bp->mii_bus);
636 } else {
637 if (pdata)
638 bp->mii_bus->phy_mask = pdata->phy_mask;
639
640 err = of_mdiobus_register(bp->mii_bus, np);
641 }
642
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200643 if (err)
Ahmad Fatoumab5f1102018-08-21 17:35:48 +0200644 goto err_out_free_fixed_link;
frederic RODO6c36a702007-07-12 19:07:24 +0200645
Boris BREZILLON7daa78e2013-08-27 14:36:14 +0200646 err = macb_mii_probe(bp->dev);
647 if (err)
frederic RODO6c36a702007-07-12 19:07:24 +0200648 goto err_out_unregister_bus;
frederic RODO6c36a702007-07-12 19:07:24 +0200649
650 return 0;
651
652err_out_unregister_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700653 mdiobus_unregister(bp->mii_bus);
Ahmad Fatoumab5f1102018-08-21 17:35:48 +0200654err_out_free_fixed_link:
Michael Grzeschik9ce98142017-11-08 09:56:34 +0100655 if (np && of_phy_is_fixed_link(np))
656 of_phy_deregister_fixed_link(np);
Brad Mouring739de9a2018-03-13 16:32:13 -0500657err_out_free_mdiobus:
658 of_node_put(bp->phy_node);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700659 mdiobus_free(bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200660err_out:
661 return err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100662}
663
664static void macb_update_stats(struct macb *bp)
665{
Jamie Ilesa494ed82011-03-09 16:26:35 +0000666 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
667 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +0300668 int offset = MACB_PFR;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100669
670 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
671
Moritz Fischer96ec6312016-03-29 19:11:11 -0700672 for (; p < end; p++, offset += 4)
David S. Miller7a6e0702015-07-27 14:24:48 -0700673 *p += bp->macb_reg_readl(bp, offset);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100674}
675
Nicolas Ferree86cd532012-10-31 06:04:57 +0000676static int macb_halt_tx(struct macb *bp)
677{
678 unsigned long halt_time, timeout;
679 u32 status;
680
681 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
682
683 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
684 do {
685 halt_time = jiffies;
686 status = macb_readl(bp, TSR);
687 if (!(status & MACB_BIT(TGO)))
688 return 0;
689
Jia-Ju Bai16fe10c2018-09-01 20:11:05 +0800690 udelay(250);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000691 } while (time_before(halt_time, timeout));
692
693 return -ETIMEDOUT;
694}
695
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200696static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
697{
698 if (tx_skb->mapping) {
699 if (tx_skb->mapped_as_page)
700 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
701 tx_skb->size, DMA_TO_DEVICE);
702 else
703 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
704 tx_skb->size, DMA_TO_DEVICE);
705 tx_skb->mapping = 0;
706 }
707
708 if (tx_skb->skb) {
709 dev_kfree_skb_any(tx_skb->skb);
710 tx_skb->skb = NULL;
711 }
712}
713
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000714static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
Harini Katakamfff80192016-08-09 13:15:53 +0530715{
Harini Katakamfff80192016-08-09 13:15:53 +0530716#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000717 struct macb_dma_desc_64 *desc_64;
718
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100719 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000720 desc_64 = macb_64b_desc(bp, desc);
721 desc_64->addrh = upper_32_bits(addr);
Anssi Hannulae100a892018-12-17 15:05:39 +0200722 /* The low bits of RX address contain the RX_USED bit, clearing
723 * of which allows packet RX. Make sure the high bits are also
724 * visible to HW at that point.
725 */
726 dma_wmb();
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000727 }
Harini Katakamfff80192016-08-09 13:15:53 +0530728#endif
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000729 desc->addr = lower_32_bits(addr);
730}
731
732static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
733{
734 dma_addr_t addr = 0;
735#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
736 struct macb_dma_desc_64 *desc_64;
737
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100738 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000739 desc_64 = macb_64b_desc(bp, desc);
740 addr = ((u64)(desc_64->addrh) << 32);
741 }
742#endif
743 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
744 return addr;
Harini Katakamfff80192016-08-09 13:15:53 +0530745}
746
Nicolas Ferree86cd532012-10-31 06:04:57 +0000747static void macb_tx_error_task(struct work_struct *work)
748{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100749 struct macb_queue *queue = container_of(work, struct macb_queue,
750 tx_error_task);
751 struct macb *bp = queue->bp;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000752 struct macb_tx_skb *tx_skb;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100753 struct macb_dma_desc *desc;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000754 struct sk_buff *skb;
755 unsigned int tail;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100756 unsigned long flags;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000757
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100758 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
759 (unsigned int)(queue - bp->queues),
760 queue->tx_tail, queue->tx_head);
761
762 /* Prevent the queue IRQ handlers from running: each of them may call
763 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
764 * As explained below, we have to halt the transmission before updating
765 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
766 * network engine about the macb/gem being halted.
767 */
768 spin_lock_irqsave(&bp->lock, flags);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000769
770 /* Make sure nobody is trying to queue up new packets */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100771 netif_tx_stop_all_queues(bp->dev);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000772
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700773 /* Stop transmission now
Nicolas Ferree86cd532012-10-31 06:04:57 +0000774 * (in case we have just queued new packets)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100775 * macb/gem must be halted to write TBQP register
Nicolas Ferree86cd532012-10-31 06:04:57 +0000776 */
777 if (macb_halt_tx(bp))
778 /* Just complain for now, reinitializing TX path can be good */
779 netdev_err(bp->dev, "BUG: halt tx timed out\n");
780
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700781 /* Treat frames in TX queue including the ones that caused the error.
Nicolas Ferree86cd532012-10-31 06:04:57 +0000782 * Free transmit buffers in upper layer.
783 */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100784 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
785 u32 ctrl;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000786
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100787 desc = macb_tx_desc(queue, tail);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000788 ctrl = desc->ctrl;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100789 tx_skb = macb_tx_skb(queue, tail);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000790 skb = tx_skb->skb;
791
792 if (ctrl & MACB_BIT(TX_USED)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200793 /* skb is set for the last buffer of the frame */
794 while (!skb) {
795 macb_tx_unmap(bp, tx_skb);
796 tail++;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100797 tx_skb = macb_tx_skb(queue, tail);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200798 skb = tx_skb->skb;
799 }
800
801 /* ctrl still refers to the first buffer descriptor
802 * since it's the only one written back by the hardware
803 */
804 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
805 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
Zach Brownb410d132016-10-19 09:56:57 -0500806 macb_tx_ring_wrap(bp, tail),
807 skb->data);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200808 bp->dev->stats.tx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000809 queue->stats.tx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200810 bp->dev->stats.tx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000811 queue->stats.tx_bytes += skb->len;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200812 }
Nicolas Ferree86cd532012-10-31 06:04:57 +0000813 } else {
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700814 /* "Buffers exhausted mid-frame" errors may only happen
815 * if the driver is buggy, so complain loudly about
816 * those. Statistics are updated by hardware.
Nicolas Ferree86cd532012-10-31 06:04:57 +0000817 */
818 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
819 netdev_err(bp->dev,
820 "BUG: TX buffers exhausted mid-frame\n");
821
822 desc->ctrl = ctrl | MACB_BIT(TX_USED);
823 }
824
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200825 macb_tx_unmap(bp, tx_skb);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000826 }
827
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100828 /* Set end of TX queue */
829 desc = macb_tx_desc(queue, 0);
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000830 macb_set_addr(bp, desc, 0);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100831 desc->ctrl = MACB_BIT(TX_USED);
832
Nicolas Ferree86cd532012-10-31 06:04:57 +0000833 /* Make descriptor updates visible to hardware */
834 wmb();
835
836 /* Reinitialize the TX desc queue */
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000837 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +0530838#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100839 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000840 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +0530841#endif
Nicolas Ferree86cd532012-10-31 06:04:57 +0000842 /* Make TX ring reflect state of hardware */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100843 queue->tx_head = 0;
844 queue->tx_tail = 0;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000845
846 /* Housework before enabling TX IRQ */
847 macb_writel(bp, TSR, macb_readl(bp, TSR));
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100848 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
849
850 /* Now we are ready to start transmission again */
851 netif_tx_start_all_queues(bp->dev);
852 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
853
854 spin_unlock_irqrestore(&bp->lock, flags);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000855}
856
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100857static void macb_tx_interrupt(struct macb_queue *queue)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100858{
859 unsigned int tail;
860 unsigned int head;
861 u32 status;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100862 struct macb *bp = queue->bp;
863 u16 queue_index = queue - bp->queues;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100864
865 status = macb_readl(bp, TSR);
866 macb_writel(bp, TSR, status);
867
Nicolas Ferre581df9e2013-05-14 03:00:16 +0000868 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100869 queue_writel(queue, ISR, MACB_BIT(TCOMP));
Steffen Trumtrar749a2b62013-03-27 23:07:05 +0000870
Nicolas Ferree86cd532012-10-31 06:04:57 +0000871 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700872 (unsigned long)status);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100873
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100874 head = queue->tx_head;
875 for (tail = queue->tx_tail; tail != head; tail++) {
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000876 struct macb_tx_skb *tx_skb;
877 struct sk_buff *skb;
878 struct macb_dma_desc *desc;
879 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100880
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100881 desc = macb_tx_desc(queue, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100882
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000883 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100884 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000885
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000886 ctrl = desc->ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100887
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200888 /* TX_USED bit is only set by hardware on the very first buffer
889 * descriptor of the transmitted frame.
890 */
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000891 if (!(ctrl & MACB_BIT(TX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100892 break;
893
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200894 /* Process all buffers of the current transmitted frame */
895 for (;; tail++) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100896 tx_skb = macb_tx_skb(queue, tail);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200897 skb = tx_skb->skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000898
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200899 /* First, update TX stats if needed */
900 if (skb) {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +0100901 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
902 /* skb now belongs to timestamp buffer
903 * and will be removed later
904 */
905 tx_skb->skb = NULL;
906 }
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200907 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
Zach Brownb410d132016-10-19 09:56:57 -0500908 macb_tx_ring_wrap(bp, tail),
909 skb->data);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200910 bp->dev->stats.tx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000911 queue->stats.tx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200912 bp->dev->stats.tx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000913 queue->stats.tx_bytes += skb->len;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200914 }
915
916 /* Now we can safely release resources */
917 macb_tx_unmap(bp, tx_skb);
918
919 /* skb is set only for the last buffer of the frame.
920 * WARNING: at this point skb has been freed by
921 * macb_tx_unmap().
922 */
923 if (skb)
924 break;
925 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100926 }
927
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100928 queue->tx_tail = tail;
929 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
930 CIRC_CNT(queue->tx_head, queue->tx_tail,
Zach Brownb410d132016-10-19 09:56:57 -0500931 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100932 netif_wake_subqueue(bp->dev, queue_index);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100933}
934
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000935static void gem_rx_refill(struct macb_queue *queue)
Nicolas Ferre4df95132013-06-04 21:57:12 +0000936{
937 unsigned int entry;
938 struct sk_buff *skb;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000939 dma_addr_t paddr;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000940 struct macb *bp = queue->bp;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000941 struct macb_dma_desc *desc;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000942
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000943 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
944 bp->rx_ring_size) > 0) {
945 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000946
947 /* Make hw descriptor updates visible to CPU */
948 rmb();
949
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000950 queue->rx_prepared_head++;
951 desc = macb_rx_desc(queue, entry);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000952
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000953 if (!queue->rx_skbuff[entry]) {
Nicolas Ferre4df95132013-06-04 21:57:12 +0000954 /* allocate sk_buff for this free entry in ring */
955 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
Moritz Fischeraa50b552016-03-29 19:11:13 -0700956 if (unlikely(!skb)) {
Nicolas Ferre4df95132013-06-04 21:57:12 +0000957 netdev_err(bp->dev,
958 "Unable to allocate sk_buff\n");
959 break;
960 }
Nicolas Ferre4df95132013-06-04 21:57:12 +0000961
962 /* now fill corresponding descriptor entry */
963 paddr = dma_map_single(&bp->pdev->dev, skb->data,
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700964 bp->rx_buffer_size,
965 DMA_FROM_DEVICE);
Soren Brinkmann92030902014-03-04 08:46:39 -0800966 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
967 dev_kfree_skb(skb);
968 break;
969 }
970
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000971 queue->rx_skbuff[entry] = skb;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000972
Zach Brownb410d132016-10-19 09:56:57 -0500973 if (entry == bp->rx_ring_size - 1)
Nicolas Ferre4df95132013-06-04 21:57:12 +0000974 paddr |= MACB_BIT(RX_WRAP);
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000975 desc->ctrl = 0;
Anssi Hannula8159eca2018-12-17 15:05:40 +0200976 /* Setting addr clears RX_USED and allows reception,
977 * make sure ctrl is cleared first to avoid a race.
978 */
979 dma_wmb();
980 macb_set_addr(bp, desc, paddr);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000981
982 /* properly align Ethernet header */
983 skb_reserve(skb, NET_IP_ALIGN);
Punnaiah Choudary Kallurid4c216c2015-04-29 08:34:46 +0530984 } else {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000985 desc->ctrl = 0;
Anssi Hannula8159eca2018-12-17 15:05:40 +0200986 dma_wmb();
987 desc->addr &= ~MACB_BIT(RX_USED);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000988 }
989 }
990
991 /* Make descriptor updates visible to hardware */
992 wmb();
993
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000994 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
995 queue, queue->rx_prepared_head, queue->rx_tail);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000996}
997
998/* Mark DMA descriptors from begin up to and not including end as unused */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000999static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
Nicolas Ferre4df95132013-06-04 21:57:12 +00001000 unsigned int end)
1001{
1002 unsigned int frag;
1003
1004 for (frag = begin; frag != end; frag++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001005 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001006
Nicolas Ferre4df95132013-06-04 21:57:12 +00001007 desc->addr &= ~MACB_BIT(RX_USED);
1008 }
1009
1010 /* Make descriptor updates visible to hardware */
1011 wmb();
1012
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001013 /* When this happens, the hardware stats registers for
Nicolas Ferre4df95132013-06-04 21:57:12 +00001014 * whatever caused this is updated, so we don't have to record
1015 * anything.
1016 */
1017}
1018
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001019static int gem_rx(struct macb_queue *queue, int budget)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001020{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001021 struct macb *bp = queue->bp;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001022 unsigned int len;
1023 unsigned int entry;
1024 struct sk_buff *skb;
1025 struct macb_dma_desc *desc;
1026 int count = 0;
1027
1028 while (count < budget) {
Harini Katakamfff80192016-08-09 13:15:53 +05301029 u32 ctrl;
1030 dma_addr_t addr;
1031 bool rxused;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001032
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001033 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1034 desc = macb_rx_desc(queue, entry);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001035
1036 /* Make hw descriptor updates visible to CPU */
1037 rmb();
1038
Harini Katakamfff80192016-08-09 13:15:53 +05301039 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001040 addr = macb_get_addr(bp, desc);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001041
Harini Katakamfff80192016-08-09 13:15:53 +05301042 if (!rxused)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001043 break;
1044
Anssi Hannula6e0af292018-12-17 15:05:41 +02001045 /* Ensure ctrl is at least as up-to-date as rxused */
1046 dma_rmb();
1047
1048 ctrl = desc->ctrl;
1049
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001050 queue->rx_tail++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001051 count++;
1052
1053 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1054 netdev_err(bp->dev,
1055 "not whole frame pointed by descriptor\n");
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001056 bp->dev->stats.rx_dropped++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001057 queue->stats.rx_dropped++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001058 break;
1059 }
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001060 skb = queue->rx_skbuff[entry];
Nicolas Ferre4df95132013-06-04 21:57:12 +00001061 if (unlikely(!skb)) {
1062 netdev_err(bp->dev,
1063 "inconsistent Rx descriptor chain\n");
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001064 bp->dev->stats.rx_dropped++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001065 queue->stats.rx_dropped++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001066 break;
1067 }
1068 /* now everything is ready for receiving packet */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001069 queue->rx_skbuff[entry] = NULL;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05301070 len = ctrl & bp->rx_frm_len_mask;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001071
1072 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1073
1074 skb_put(skb, len);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001075 dma_unmap_single(&bp->pdev->dev, addr,
Soren Brinkmann48330e082014-03-04 08:46:40 -08001076 bp->rx_buffer_size, DMA_FROM_DEVICE);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001077
1078 skb->protocol = eth_type_trans(skb, bp->dev);
1079 skb_checksum_none_assert(skb);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02001080 if (bp->dev->features & NETIF_F_RXCSUM &&
1081 !(bp->dev->flags & IFF_PROMISC) &&
1082 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1083 skb->ip_summed = CHECKSUM_UNNECESSARY;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001084
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001085 bp->dev->stats.rx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001086 queue->stats.rx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001087 bp->dev->stats.rx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001088 queue->stats.rx_bytes += skb->len;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001089
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01001090 gem_ptp_do_rxstamp(bp, skb, desc);
1091
Nicolas Ferre4df95132013-06-04 21:57:12 +00001092#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1093 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1094 skb->len, skb->csum);
1095 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
Cyrille Pitchen51f83012014-12-11 11:15:54 +01001096 skb_mac_header(skb), 16, true);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001097 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1098 skb->data, 32, true);
1099#endif
1100
1101 netif_receive_skb(skb);
1102 }
1103
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001104 gem_rx_refill(queue);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001105
1106 return count;
1107}
1108
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001109static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001110 unsigned int last_frag)
1111{
1112 unsigned int len;
1113 unsigned int frag;
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001114 unsigned int offset;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001115 struct sk_buff *skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001116 struct macb_dma_desc *desc;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001117 struct macb *bp = queue->bp;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001118
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001119 desc = macb_rx_desc(queue, last_frag);
Harini Katakam98b5a0f42015-05-06 22:27:17 +05301120 len = desc->ctrl & bp->rx_frm_len_mask;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001121
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001122 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
Zach Brownb410d132016-10-19 09:56:57 -05001123 macb_rx_ring_wrap(bp, first_frag),
1124 macb_rx_ring_wrap(bp, last_frag), len);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001125
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001126 /* The ethernet header starts NET_IP_ALIGN bytes into the
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001127 * first buffer. Since the header is 14 bytes, this makes the
1128 * payload word-aligned.
1129 *
1130 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1131 * the two padding bytes into the skb so that we avoid hitting
1132 * the slowpath in memcpy(), and pull them off afterwards.
1133 */
1134 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001135 if (!skb) {
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001136 bp->dev->stats.rx_dropped++;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001137 for (frag = first_frag; ; frag++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001138 desc = macb_rx_desc(queue, frag);
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001139 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001140 if (frag == last_frag)
1141 break;
1142 }
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001143
1144 /* Make descriptor updates visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001145 wmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001146
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001147 return 1;
1148 }
1149
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001150 offset = 0;
1151 len += NET_IP_ALIGN;
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001152 skb_checksum_none_assert(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001153 skb_put(skb, len);
1154
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001155 for (frag = first_frag; ; frag++) {
Nicolas Ferre1b447912013-06-04 21:57:11 +00001156 unsigned int frag_len = bp->rx_buffer_size;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001157
1158 if (offset + frag_len > len) {
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001159 if (unlikely(frag != last_frag)) {
1160 dev_kfree_skb_any(skb);
1161 return -1;
1162 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001163 frag_len = len - offset;
1164 }
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001165 skb_copy_to_linear_data_offset(skb, offset,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001166 macb_rx_buffer(queue, frag),
Moritz Fischeraa50b552016-03-29 19:11:13 -07001167 frag_len);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001168 offset += bp->rx_buffer_size;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001169 desc = macb_rx_desc(queue, frag);
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001170 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001171
1172 if (frag == last_frag)
1173 break;
1174 }
1175
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001176 /* Make descriptor updates visible to hardware */
1177 wmb();
1178
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001179 __skb_pull(skb, NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001180 skb->protocol = eth_type_trans(skb, bp->dev);
1181
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001182 bp->dev->stats.rx_packets++;
1183 bp->dev->stats.rx_bytes += skb->len;
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001184 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001185 skb->len, skb->csum);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001186 netif_receive_skb(skb);
1187
1188 return 0;
1189}
1190
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001191static inline void macb_init_rx_ring(struct macb_queue *queue)
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001192{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001193 struct macb *bp = queue->bp;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001194 dma_addr_t addr;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001195 struct macb_dma_desc *desc = NULL;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001196 int i;
1197
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001198 addr = queue->rx_buffers_dma;
Zach Brownb410d132016-10-19 09:56:57 -05001199 for (i = 0; i < bp->rx_ring_size; i++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001200 desc = macb_rx_desc(queue, i);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001201 macb_set_addr(bp, desc, addr);
1202 desc->ctrl = 0;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001203 addr += bp->rx_buffer_size;
1204 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001205 desc->addr |= MACB_BIT(RX_WRAP);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001206 queue->rx_tail = 0;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001207}
1208
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001209static int macb_rx(struct macb_queue *queue, int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001210{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001211 struct macb *bp = queue->bp;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001212 bool reset_rx_queue = false;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001213 int received = 0;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001214 unsigned int tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001215 int first_frag = -1;
1216
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001217 for (tail = queue->rx_tail; budget > 0; tail++) {
1218 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001219 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001220
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001221 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001222 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001223
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001224 if (!(desc->addr & MACB_BIT(RX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001225 break;
1226
Anssi Hannula6e0af292018-12-17 15:05:41 +02001227 /* Ensure ctrl is at least as up-to-date as addr */
1228 dma_rmb();
1229
1230 ctrl = desc->ctrl;
1231
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001232 if (ctrl & MACB_BIT(RX_SOF)) {
1233 if (first_frag != -1)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001234 discard_partial_frame(queue, first_frag, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001235 first_frag = tail;
1236 }
1237
1238 if (ctrl & MACB_BIT(RX_EOF)) {
1239 int dropped;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001240
1241 if (unlikely(first_frag == -1)) {
1242 reset_rx_queue = true;
1243 continue;
1244 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001245
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001246 dropped = macb_rx_frame(queue, first_frag, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001247 first_frag = -1;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001248 if (unlikely(dropped < 0)) {
1249 reset_rx_queue = true;
1250 continue;
1251 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001252 if (!dropped) {
1253 received++;
1254 budget--;
1255 }
1256 }
1257 }
1258
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001259 if (unlikely(reset_rx_queue)) {
1260 unsigned long flags;
1261 u32 ctrl;
1262
1263 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1264
1265 spin_lock_irqsave(&bp->lock, flags);
1266
1267 ctrl = macb_readl(bp, NCR);
1268 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1269
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001270 macb_init_rx_ring(queue);
1271 queue_writel(queue, RBQP, queue->rx_ring_dma);
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001272
1273 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1274
1275 spin_unlock_irqrestore(&bp->lock, flags);
1276 return received;
1277 }
1278
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001279 if (first_frag != -1)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001280 queue->rx_tail = first_frag;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001281 else
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001282 queue->rx_tail = tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001283
1284 return received;
1285}
1286
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001287static int macb_poll(struct napi_struct *napi, int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001288{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001289 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1290 struct macb *bp = queue->bp;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001291 int work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001292 u32 status;
1293
1294 status = macb_readl(bp, RSR);
1295 macb_writel(bp, RSR, status);
1296
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001297 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001298 (unsigned long)status, budget);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001299
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001300 work_done = bp->macbgem_ops.mog_rx(queue, budget);
Joshua Hokeb3363692010-10-25 01:44:22 +00001301 if (work_done < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001302 napi_complete_done(napi, work_done);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001303
Nicolas Ferre8770e912013-02-12 11:08:48 +01001304 /* Packets received while interrupts were disabled */
1305 status = macb_readl(bp, RSR);
Soren Brinkmann504ad982014-05-04 15:43:01 -07001306 if (status) {
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001307 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001308 queue_writel(queue, ISR, MACB_BIT(RCOMP));
Nicolas Ferre8770e912013-02-12 11:08:48 +01001309 napi_reschedule(napi);
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001310 } else {
Harini Katakame5010702019-01-29 15:20:03 +05301311 queue_writel(queue, IER, bp->rx_intr_mask);
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001312 }
Joshua Hokeb3363692010-10-25 01:44:22 +00001313 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001314
1315 /* TODO: Handle errors */
1316
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001317 return work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001318}
1319
Harini Katakam032dc412018-01-27 12:09:01 +05301320static void macb_hresp_error_task(unsigned long data)
1321{
1322 struct macb *bp = (struct macb *)data;
1323 struct net_device *dev = bp->dev;
1324 struct macb_queue *queue = bp->queues;
1325 unsigned int q;
1326 u32 ctrl;
1327
1328 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Harini Katakame5010702019-01-29 15:20:03 +05301329 queue_writel(queue, IDR, bp->rx_intr_mask |
Harini Katakam032dc412018-01-27 12:09:01 +05301330 MACB_TX_INT_FLAGS |
1331 MACB_BIT(HRESP));
1332 }
1333 ctrl = macb_readl(bp, NCR);
1334 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1335 macb_writel(bp, NCR, ctrl);
1336
1337 netif_tx_stop_all_queues(dev);
1338 netif_carrier_off(dev);
1339
1340 bp->macbgem_ops.mog_init_rings(bp);
1341
1342 /* Initialize TX and RX buffers */
1343 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1344 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1345#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1346 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1347 queue_writel(queue, RBQPH,
1348 upper_32_bits(queue->rx_ring_dma));
1349#endif
1350 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1351#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1352 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1353 queue_writel(queue, TBQPH,
1354 upper_32_bits(queue->tx_ring_dma));
1355#endif
1356
1357 /* Enable interrupts */
1358 queue_writel(queue, IER,
Harini Katakame5010702019-01-29 15:20:03 +05301359 bp->rx_intr_mask |
Harini Katakam032dc412018-01-27 12:09:01 +05301360 MACB_TX_INT_FLAGS |
1361 MACB_BIT(HRESP));
1362 }
1363
1364 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1365 macb_writel(bp, NCR, ctrl);
1366
1367 netif_carrier_on(dev);
1368 netif_tx_start_all_queues(dev);
1369}
1370
Claudiu Beznea42983882018-12-17 10:02:42 +00001371static void macb_tx_restart(struct macb_queue *queue)
1372{
1373 unsigned int head = queue->tx_head;
1374 unsigned int tail = queue->tx_tail;
1375 struct macb *bp = queue->bp;
1376
1377 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1378 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1379
1380 if (head == tail)
1381 return;
1382
1383 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1384}
1385
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001386static irqreturn_t macb_interrupt(int irq, void *dev_id)
1387{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001388 struct macb_queue *queue = dev_id;
1389 struct macb *bp = queue->bp;
1390 struct net_device *dev = bp->dev;
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001391 u32 status, ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001392
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001393 status = queue_readl(queue, ISR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001394
1395 if (unlikely(!status))
1396 return IRQ_NONE;
1397
1398 spin_lock(&bp->lock);
1399
1400 while (status) {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001401 /* close possible race with dev_close */
1402 if (unlikely(!netif_running(dev))) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001403 queue_writel(queue, IDR, -1);
Nathan Sullivan24468372016-01-14 13:27:27 -06001404 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1405 queue_writel(queue, ISR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001406 break;
1407 }
1408
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001409 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1410 (unsigned int)(queue - bp->queues),
1411 (unsigned long)status);
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001412
Harini Katakame5010702019-01-29 15:20:03 +05301413 if (status & bp->rx_intr_mask) {
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001414 /* There's no point taking any more interrupts
Joshua Hokeb3363692010-10-25 01:44:22 +00001415 * until we have processed the buffers. The
1416 * scheduling call may fail if the poll routine
1417 * is already scheduled, so disable interrupts
1418 * now.
1419 */
Harini Katakame5010702019-01-29 15:20:03 +05301420 queue_writel(queue, IDR, bp->rx_intr_mask);
Nicolas Ferre581df9e2013-05-14 03:00:16 +00001421 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001422 queue_writel(queue, ISR, MACB_BIT(RCOMP));
Joshua Hokeb3363692010-10-25 01:44:22 +00001423
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001424 if (napi_schedule_prep(&queue->napi)) {
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001425 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001426 __napi_schedule(&queue->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001427 }
1428 }
1429
Nicolas Ferree86cd532012-10-31 06:04:57 +00001430 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001431 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1432 schedule_work(&queue->tx_error_task);
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001433
1434 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001435 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001436
Nicolas Ferree86cd532012-10-31 06:04:57 +00001437 break;
1438 }
1439
1440 if (status & MACB_BIT(TCOMP))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001441 macb_tx_interrupt(queue);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001442
Claudiu Beznea42983882018-12-17 10:02:42 +00001443 if (status & MACB_BIT(TXUBR))
1444 macb_tx_restart(queue);
1445
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001446 /* Link change detection isn't possible with RMII, so we'll
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001447 * add that if/when we get our hands on a full-blown MII PHY.
1448 */
1449
Nathan Sullivan86b5e7d2015-05-13 17:01:36 -05001450 /* There is a hardware issue under heavy load where DMA can
1451 * stop, this causes endless "used buffer descriptor read"
1452 * interrupts but it can be cleared by re-enabling RX. See
Harini Katakame5010702019-01-29 15:20:03 +05301453 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1454 * section 16.7.4 for details. RXUBR is only enabled for
1455 * these two versions.
Nathan Sullivan86b5e7d2015-05-13 17:01:36 -05001456 */
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001457 if (status & MACB_BIT(RXUBR)) {
1458 ctrl = macb_readl(bp, NCR);
1459 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
Zumeng Chenffac0e92016-11-28 21:55:00 +08001460 wmb();
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001461 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1462
1463 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchenba504992016-03-24 15:40:04 +01001464 queue_writel(queue, ISR, MACB_BIT(RXUBR));
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001465 }
1466
Alexander Steinb19f7f72011-04-13 05:03:24 +00001467 if (status & MACB_BIT(ISR_ROVR)) {
1468 /* We missed at least one packet */
Jamie Ilesf75ba502011-11-08 10:12:32 +00001469 if (macb_is_gem(bp))
1470 bp->hw_stats.gem.rx_overruns++;
1471 else
1472 bp->hw_stats.macb.rx_overruns++;
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001473
1474 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001475 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
Alexander Steinb19f7f72011-04-13 05:03:24 +00001476 }
1477
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001478 if (status & MACB_BIT(HRESP)) {
Harini Katakam032dc412018-01-27 12:09:01 +05301479 tasklet_schedule(&bp->hresp_err_tasklet);
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001480 netdev_err(dev, "DMA bus error: HRESP not OK\n");
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001481
1482 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001483 queue_writel(queue, ISR, MACB_BIT(HRESP));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001484 }
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001485 status = queue_readl(queue, ISR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001486 }
1487
1488 spin_unlock(&bp->lock);
1489
1490 return IRQ_HANDLED;
1491}
1492
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001493#ifdef CONFIG_NET_POLL_CONTROLLER
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001494/* Polling receive - used by netconsole and other diagnostic tools
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001495 * to allow network i/o with interrupts disabled.
1496 */
1497static void macb_poll_controller(struct net_device *dev)
1498{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001499 struct macb *bp = netdev_priv(dev);
1500 struct macb_queue *queue;
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001501 unsigned long flags;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001502 unsigned int q;
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001503
1504 local_irq_save(flags);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001505 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1506 macb_interrupt(dev->irq, queue);
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001507 local_irq_restore(flags);
1508}
1509#endif
1510
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001511static unsigned int macb_tx_map(struct macb *bp,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001512 struct macb_queue *queue,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001513 struct sk_buff *skb,
1514 unsigned int hdrlen)
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001515{
1516 dma_addr_t mapping;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001517 unsigned int len, entry, i, tx_head = queue->tx_head;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001518 struct macb_tx_skb *tx_skb = NULL;
1519 struct macb_dma_desc *desc;
1520 unsigned int offset, size, count = 0;
1521 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001522 unsigned int eof = 1, mss_mfs = 0;
1523 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1524
1525 /* LSO */
1526 if (skb_shinfo(skb)->gso_size != 0) {
1527 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1528 /* UDP - UFO */
1529 lso_ctrl = MACB_LSO_UFO_ENABLE;
1530 else
1531 /* TCP - TSO */
1532 lso_ctrl = MACB_LSO_TSO_ENABLE;
1533 }
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001534
1535 /* First, map non-paged data */
1536 len = skb_headlen(skb);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001537
1538 /* first buffer length */
1539 size = hdrlen;
1540
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001541 offset = 0;
1542 while (len) {
Zach Brownb410d132016-10-19 09:56:57 -05001543 entry = macb_tx_ring_wrap(bp, tx_head);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001544 tx_skb = &queue->tx_skb[entry];
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001545
1546 mapping = dma_map_single(&bp->pdev->dev,
1547 skb->data + offset,
1548 size, DMA_TO_DEVICE);
1549 if (dma_mapping_error(&bp->pdev->dev, mapping))
1550 goto dma_error;
1551
1552 /* Save info to properly release resources */
1553 tx_skb->skb = NULL;
1554 tx_skb->mapping = mapping;
1555 tx_skb->size = size;
1556 tx_skb->mapped_as_page = false;
1557
1558 len -= size;
1559 offset += size;
1560 count++;
1561 tx_head++;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001562
1563 size = min(len, bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001564 }
1565
1566 /* Then, map paged data from fragments */
1567 for (f = 0; f < nr_frags; f++) {
1568 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1569
1570 len = skb_frag_size(frag);
1571 offset = 0;
1572 while (len) {
1573 size = min(len, bp->max_tx_length);
Zach Brownb410d132016-10-19 09:56:57 -05001574 entry = macb_tx_ring_wrap(bp, tx_head);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001575 tx_skb = &queue->tx_skb[entry];
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001576
1577 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1578 offset, size, DMA_TO_DEVICE);
1579 if (dma_mapping_error(&bp->pdev->dev, mapping))
1580 goto dma_error;
1581
1582 /* Save info to properly release resources */
1583 tx_skb->skb = NULL;
1584 tx_skb->mapping = mapping;
1585 tx_skb->size = size;
1586 tx_skb->mapped_as_page = true;
1587
1588 len -= size;
1589 offset += size;
1590 count++;
1591 tx_head++;
1592 }
1593 }
1594
1595 /* Should never happen */
Moritz Fischeraa50b552016-03-29 19:11:13 -07001596 if (unlikely(!tx_skb)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001597 netdev_err(bp->dev, "BUG! empty skb!\n");
1598 return 0;
1599 }
1600
1601 /* This is the last buffer of the frame: save socket buffer */
1602 tx_skb->skb = skb;
1603
1604 /* Update TX ring: update buffer descriptors in reverse order
1605 * to avoid race condition
1606 */
1607
1608 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1609 * to set the end of TX queue
1610 */
1611 i = tx_head;
Zach Brownb410d132016-10-19 09:56:57 -05001612 entry = macb_tx_ring_wrap(bp, i);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001613 ctrl = MACB_BIT(TX_USED);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001614 desc = macb_tx_desc(queue, entry);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001615 desc->ctrl = ctrl;
1616
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001617 if (lso_ctrl) {
1618 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1619 /* include header and FCS in value given to h/w */
1620 mss_mfs = skb_shinfo(skb)->gso_size +
1621 skb_transport_offset(skb) +
1622 ETH_FCS_LEN;
1623 else /* TSO */ {
1624 mss_mfs = skb_shinfo(skb)->gso_size;
1625 /* TCP Sequence Number Source Select
1626 * can be set only for TSO
1627 */
1628 seq_ctrl = 0;
1629 }
1630 }
1631
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001632 do {
1633 i--;
Zach Brownb410d132016-10-19 09:56:57 -05001634 entry = macb_tx_ring_wrap(bp, i);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001635 tx_skb = &queue->tx_skb[entry];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001636 desc = macb_tx_desc(queue, entry);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001637
1638 ctrl = (u32)tx_skb->size;
1639 if (eof) {
1640 ctrl |= MACB_BIT(TX_LAST);
1641 eof = 0;
1642 }
Zach Brownb410d132016-10-19 09:56:57 -05001643 if (unlikely(entry == (bp->tx_ring_size - 1)))
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001644 ctrl |= MACB_BIT(TX_WRAP);
1645
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001646 /* First descriptor is header descriptor */
1647 if (i == queue->tx_head) {
1648 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1649 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001650 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
1651 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
1652 ctrl |= MACB_BIT(TX_NOCRC);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001653 } else
1654 /* Only set MSS/MFS on payload descriptors
1655 * (second or later descriptor)
1656 */
1657 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1658
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001659 /* Set TX buffer descriptor */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001660 macb_set_addr(bp, desc, tx_skb->mapping);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001661 /* desc->addr must be visible to hardware before clearing
1662 * 'TX_USED' bit in desc->ctrl.
1663 */
1664 wmb();
1665 desc->ctrl = ctrl;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001666 } while (i != queue->tx_head);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001667
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001668 queue->tx_head = tx_head;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001669
1670 return count;
1671
1672dma_error:
1673 netdev_err(bp->dev, "TX DMA map failed\n");
1674
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001675 for (i = queue->tx_head; i != tx_head; i++) {
1676 tx_skb = macb_tx_skb(queue, i);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001677
1678 macb_tx_unmap(bp, tx_skb);
1679 }
1680
1681 return 0;
1682}
1683
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001684static netdev_features_t macb_features_check(struct sk_buff *skb,
1685 struct net_device *dev,
1686 netdev_features_t features)
1687{
1688 unsigned int nr_frags, f;
1689 unsigned int hdrlen;
1690
1691 /* Validate LSO compatibility */
1692
1693 /* there is only one buffer */
1694 if (!skb_is_nonlinear(skb))
1695 return features;
1696
1697 /* length of header */
1698 hdrlen = skb_transport_offset(skb);
1699 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1700 hdrlen += tcp_hdrlen(skb);
1701
1702 /* For LSO:
1703 * When software supplies two or more payload buffers all payload buffers
1704 * apart from the last must be a multiple of 8 bytes in size.
1705 */
1706 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1707 return features & ~MACB_NETIF_LSO;
1708
1709 nr_frags = skb_shinfo(skb)->nr_frags;
1710 /* No need to check last fragment */
1711 nr_frags--;
1712 for (f = 0; f < nr_frags; f++) {
1713 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1714
1715 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1716 return features & ~MACB_NETIF_LSO;
1717 }
1718 return features;
1719}
1720
Helmut Buchsbaum007e4ba2016-09-04 18:09:47 +02001721static inline int macb_clear_csum(struct sk_buff *skb)
1722{
1723 /* no change for packets without checksum offloading */
1724 if (skb->ip_summed != CHECKSUM_PARTIAL)
1725 return 0;
1726
1727 /* make sure we can modify the header */
1728 if (unlikely(skb_cow_head(skb, 0)))
1729 return -1;
1730
1731 /* initialize checksum field
1732 * This is required - at least for Zynq, which otherwise calculates
1733 * wrong UDP header checksums for UDP packets with UDP data len <=2
1734 */
1735 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1736 return 0;
1737}
1738
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001739static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1740{
1741 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
1742 int padlen = ETH_ZLEN - (*skb)->len;
1743 int headroom = skb_headroom(*skb);
1744 int tailroom = skb_tailroom(*skb);
1745 struct sk_buff *nskb;
1746 u32 fcs;
1747
1748 if (!(ndev->features & NETIF_F_HW_CSUM) ||
1749 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
1750 skb_shinfo(*skb)->gso_size) /* Not available for GSO */
1751 return 0;
1752
1753 if (padlen <= 0) {
1754 /* FCS could be appeded to tailroom. */
1755 if (tailroom >= ETH_FCS_LEN)
1756 goto add_fcs;
1757 /* FCS could be appeded by moving data to headroom. */
1758 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
1759 padlen = 0;
1760 /* No room for FCS, need to reallocate skb. */
1761 else
Tristram Ha899ecae2018-10-24 14:51:23 -07001762 padlen = ETH_FCS_LEN;
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001763 } else {
1764 /* Add room for FCS. */
1765 padlen += ETH_FCS_LEN;
1766 }
1767
1768 if (!cloned && headroom + tailroom >= padlen) {
1769 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
1770 skb_set_tail_pointer(*skb, (*skb)->len);
1771 } else {
1772 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
1773 if (!nskb)
1774 return -ENOMEM;
1775
Huang Zijiangf3e5c072019-02-14 14:41:18 +08001776 dev_consume_skb_any(*skb);
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001777 *skb = nskb;
1778 }
1779
Claudiu Bezneaba3e1842019-01-03 14:59:35 +00001780 if (padlen > ETH_FCS_LEN)
1781 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001782
1783add_fcs:
1784 /* set FCS to packet */
1785 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
1786 fcs = ~fcs;
1787
1788 skb_put_u8(*skb, fcs & 0xff);
1789 skb_put_u8(*skb, (fcs >> 8) & 0xff);
1790 skb_put_u8(*skb, (fcs >> 16) & 0xff);
1791 skb_put_u8(*skb, (fcs >> 24) & 0xff);
1792
1793 return 0;
1794}
1795
Claudiu Beznead1c38952018-08-07 12:25:12 +03001796static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001797{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001798 u16 queue_index = skb_get_queue_mapping(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001799 struct macb *bp = netdev_priv(dev);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001800 struct macb_queue *queue = &bp->queues[queue_index];
Dongdong Deng48719532009-08-23 19:49:07 -07001801 unsigned long flags;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001802 unsigned int desc_cnt, nr_frags, frag_size, f;
1803 unsigned int hdrlen;
1804 bool is_lso, is_udp = 0;
Claudiu Beznead1c38952018-08-07 12:25:12 +03001805 netdev_tx_t ret = NETDEV_TX_OK;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001806
Claudiu Beznea33729f22018-08-07 12:25:13 +03001807 if (macb_clear_csum(skb)) {
1808 dev_kfree_skb_any(skb);
1809 return ret;
1810 }
1811
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001812 if (macb_pad_and_fcs(&skb, dev)) {
1813 dev_kfree_skb_any(skb);
1814 return ret;
1815 }
1816
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001817 is_lso = (skb_shinfo(skb)->gso_size != 0);
1818
1819 if (is_lso) {
1820 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1821
1822 /* length of headers */
1823 if (is_udp)
1824 /* only queue eth + ip headers separately for UDP */
1825 hdrlen = skb_transport_offset(skb);
1826 else
1827 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1828 if (skb_headlen(skb) < hdrlen) {
1829 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1830 /* if this is required, would need to copy to single buffer */
1831 return NETDEV_TX_BUSY;
1832 }
1833 } else
1834 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001835
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001836#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1837 netdev_vdbg(bp->dev,
Moritz Fischeraa50b552016-03-29 19:11:13 -07001838 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1839 queue_index, skb->len, skb->head, skb->data,
1840 skb_tail_pointer(skb), skb_end_pointer(skb));
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001841 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1842 skb->data, 16, true);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001843#endif
1844
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001845 /* Count how many TX buffer descriptors are needed to send this
1846 * socket buffer: skb fragments of jumbo frames may need to be
Moritz Fischeraa50b552016-03-29 19:11:13 -07001847 * split into many buffer descriptors.
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001848 */
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001849 if (is_lso && (skb_headlen(skb) > hdrlen))
1850 /* extra header descriptor if also payload in first buffer */
1851 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1852 else
1853 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001854 nr_frags = skb_shinfo(skb)->nr_frags;
1855 for (f = 0; f < nr_frags; f++) {
1856 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001857 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001858 }
1859
Dongdong Deng48719532009-08-23 19:49:07 -07001860 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001861
1862 /* This is a hard error, log it. */
Zach Brownb410d132016-10-19 09:56:57 -05001863 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001864 bp->tx_ring_size) < desc_cnt) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001865 netif_stop_subqueue(dev, queue_index);
Dongdong Deng48719532009-08-23 19:49:07 -07001866 spin_unlock_irqrestore(&bp->lock, flags);
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001867 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001868 queue->tx_head, queue->tx_tail);
Patrick McHardy5b548142009-06-12 06:22:29 +00001869 return NETDEV_TX_BUSY;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001870 }
1871
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001872 /* Map socket buffer for DMA transfer */
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001873 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
Eric W. Biedermanc88b5b62014-03-15 16:08:27 -07001874 dev_kfree_skb_any(skb);
Soren Brinkmann92030902014-03-04 08:46:39 -08001875 goto unlock;
1876 }
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001877
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001878 /* Make newly initialized descriptor visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001879 wmb();
Richard Cochrane0720922011-06-19 21:51:28 +00001880 skb_tx_timestamp(skb);
1881
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001882 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1883
Zach Brownb410d132016-10-19 09:56:57 -05001884 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001885 netif_stop_subqueue(dev, queue_index);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001886
Soren Brinkmann92030902014-03-04 08:46:39 -08001887unlock:
Dongdong Deng48719532009-08-23 19:49:07 -07001888 spin_unlock_irqrestore(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001889
Claudiu Beznead1c38952018-08-07 12:25:12 +03001890 return ret;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001891}
1892
Nicolas Ferre4df95132013-06-04 21:57:12 +00001893static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
Nicolas Ferre1b447912013-06-04 21:57:11 +00001894{
1895 if (!macb_is_gem(bp)) {
1896 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1897 } else {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001898 bp->rx_buffer_size = size;
Nicolas Ferre1b447912013-06-04 21:57:11 +00001899
Nicolas Ferre1b447912013-06-04 21:57:11 +00001900 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001901 netdev_dbg(bp->dev,
Moritz Fischeraa50b552016-03-29 19:11:13 -07001902 "RX buffer must be multiple of %d bytes, expanding\n",
1903 RX_BUFFER_MULTIPLE);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001904 bp->rx_buffer_size =
Nicolas Ferre4df95132013-06-04 21:57:12 +00001905 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001906 }
Nicolas Ferre1b447912013-06-04 21:57:11 +00001907 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001908
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -08001909 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
Nicolas Ferre4df95132013-06-04 21:57:12 +00001910 bp->dev->mtu, bp->rx_buffer_size);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001911}
1912
Nicolas Ferre4df95132013-06-04 21:57:12 +00001913static void gem_free_rx_buffers(struct macb *bp)
1914{
1915 struct sk_buff *skb;
1916 struct macb_dma_desc *desc;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001917 struct macb_queue *queue;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001918 dma_addr_t addr;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001919 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001920 int i;
1921
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001922 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1923 if (!queue->rx_skbuff)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001924 continue;
1925
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001926 for (i = 0; i < bp->rx_ring_size; i++) {
1927 skb = queue->rx_skbuff[i];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001928
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001929 if (!skb)
1930 continue;
1931
1932 desc = macb_rx_desc(queue, i);
1933 addr = macb_get_addr(bp, desc);
1934
1935 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1936 DMA_FROM_DEVICE);
1937 dev_kfree_skb_any(skb);
1938 skb = NULL;
1939 }
1940
1941 kfree(queue->rx_skbuff);
1942 queue->rx_skbuff = NULL;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001943 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001944}
1945
1946static void macb_free_rx_buffers(struct macb *bp)
1947{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001948 struct macb_queue *queue = &bp->queues[0];
1949
1950 if (queue->rx_buffers) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001951 dma_free_coherent(&bp->pdev->dev,
Zach Brownb410d132016-10-19 09:56:57 -05001952 bp->rx_ring_size * bp->rx_buffer_size,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001953 queue->rx_buffers, queue->rx_buffers_dma);
1954 queue->rx_buffers = NULL;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001955 }
1956}
Nicolas Ferre1b447912013-06-04 21:57:11 +00001957
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001958static void macb_free_consistent(struct macb *bp)
1959{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001960 struct macb_queue *queue;
1961 unsigned int q;
Harini Katakam404cd082018-07-06 12:18:58 +05301962 int size;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001963
Nicolas Ferre4df95132013-06-04 21:57:12 +00001964 bp->macbgem_ops.mog_free_rx_buffers(bp);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001965
1966 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1967 kfree(queue->tx_skb);
1968 queue->tx_skb = NULL;
1969 if (queue->tx_ring) {
Harini Katakam404cd082018-07-06 12:18:58 +05301970 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1971 dma_free_coherent(&bp->pdev->dev, size,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001972 queue->tx_ring, queue->tx_ring_dma);
1973 queue->tx_ring = NULL;
1974 }
Harini Katakame50b7702018-07-06 12:18:57 +05301975 if (queue->rx_ring) {
Harini Katakam404cd082018-07-06 12:18:58 +05301976 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1977 dma_free_coherent(&bp->pdev->dev, size,
Harini Katakame50b7702018-07-06 12:18:57 +05301978 queue->rx_ring, queue->rx_ring_dma);
1979 queue->rx_ring = NULL;
1980 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001981 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001982}
1983
1984static int gem_alloc_rx_buffers(struct macb *bp)
1985{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001986 struct macb_queue *queue;
1987 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001988 int size;
1989
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001990 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1991 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1992 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
1993 if (!queue->rx_skbuff)
1994 return -ENOMEM;
1995 else
1996 netdev_dbg(bp->dev,
1997 "Allocated %d RX struct sk_buff entries at %p\n",
1998 bp->rx_ring_size, queue->rx_skbuff);
1999 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002000 return 0;
2001}
2002
2003static int macb_alloc_rx_buffers(struct macb *bp)
2004{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002005 struct macb_queue *queue = &bp->queues[0];
Nicolas Ferre4df95132013-06-04 21:57:12 +00002006 int size;
2007
Zach Brownb410d132016-10-19 09:56:57 -05002008 size = bp->rx_ring_size * bp->rx_buffer_size;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002009 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2010 &queue->rx_buffers_dma, GFP_KERNEL);
2011 if (!queue->rx_buffers)
Nicolas Ferre4df95132013-06-04 21:57:12 +00002012 return -ENOMEM;
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002013
2014 netdev_dbg(bp->dev,
2015 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002016 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
Nicolas Ferre4df95132013-06-04 21:57:12 +00002017 return 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002018}
2019
2020static int macb_alloc_consistent(struct macb *bp)
2021{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002022 struct macb_queue *queue;
2023 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002024 int size;
2025
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002026 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Harini Katakam404cd082018-07-06 12:18:58 +05302027 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002028 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2029 &queue->tx_ring_dma,
2030 GFP_KERNEL);
2031 if (!queue->tx_ring)
2032 goto out_err;
2033 netdev_dbg(bp->dev,
2034 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2035 q, size, (unsigned long)queue->tx_ring_dma,
2036 queue->tx_ring);
2037
Zach Brownb410d132016-10-19 09:56:57 -05002038 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002039 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2040 if (!queue->tx_skb)
2041 goto out_err;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002042
Harini Katakam404cd082018-07-06 12:18:58 +05302043 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002044 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2045 &queue->rx_ring_dma, GFP_KERNEL);
2046 if (!queue->rx_ring)
2047 goto out_err;
2048 netdev_dbg(bp->dev,
2049 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2050 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002051 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002052 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002053 goto out_err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002054
2055 return 0;
2056
2057out_err:
2058 macb_free_consistent(bp);
2059 return -ENOMEM;
2060}
2061
Nicolas Ferre4df95132013-06-04 21:57:12 +00002062static void gem_init_rings(struct macb *bp)
2063{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002064 struct macb_queue *queue;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002065 struct macb_dma_desc *desc = NULL;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002066 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002067 int i;
2068
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002069 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Zach Brownb410d132016-10-19 09:56:57 -05002070 for (i = 0; i < bp->tx_ring_size; i++) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002071 desc = macb_tx_desc(queue, i);
2072 macb_set_addr(bp, desc, 0);
2073 desc->ctrl = MACB_BIT(TX_USED);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002074 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002075 desc->ctrl |= MACB_BIT(TX_WRAP);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002076 queue->tx_head = 0;
2077 queue->tx_tail = 0;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002078
2079 queue->rx_tail = 0;
2080 queue->rx_prepared_head = 0;
2081
2082 gem_rx_refill(queue);
Nicolas Ferre4df95132013-06-04 21:57:12 +00002083 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002084
Nicolas Ferre4df95132013-06-04 21:57:12 +00002085}
2086
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002087static void macb_init_rings(struct macb *bp)
2088{
2089 int i;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002090 struct macb_dma_desc *desc = NULL;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002091
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002092 macb_init_rx_ring(&bp->queues[0]);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002093
Zach Brownb410d132016-10-19 09:56:57 -05002094 for (i = 0; i < bp->tx_ring_size; i++) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002095 desc = macb_tx_desc(&bp->queues[0], i);
2096 macb_set_addr(bp, desc, 0);
2097 desc->ctrl = MACB_BIT(TX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002098 }
Ben Shelton21d35152015-04-22 17:28:54 -05002099 bp->queues[0].tx_head = 0;
2100 bp->queues[0].tx_tail = 0;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002101 desc->ctrl |= MACB_BIT(TX_WRAP);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002102}
2103
2104static void macb_reset_hw(struct macb *bp)
2105{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002106 struct macb_queue *queue;
2107 unsigned int q;
Anssi Hannula0da70f82018-08-23 10:45:22 +03002108 u32 ctrl = macb_readl(bp, NCR);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002109
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002110 /* Disable RX and TX (XXX: Should we halt the transmission
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002111 * more gracefully?)
2112 */
Anssi Hannula0da70f82018-08-23 10:45:22 +03002113 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002114
2115 /* Clear the stats registers (XXX: Update stats first?) */
Anssi Hannula0da70f82018-08-23 10:45:22 +03002116 ctrl |= MACB_BIT(CLRSTAT);
2117
2118 macb_writel(bp, NCR, ctrl);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002119
2120 /* Clear all status flags */
Joachim Eastwood95ebcea2012-10-22 08:45:31 +00002121 macb_writel(bp, TSR, -1);
2122 macb_writel(bp, RSR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002123
2124 /* Disable all interrupts */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002125 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2126 queue_writel(queue, IDR, -1);
2127 queue_readl(queue, ISR);
Nathan Sullivan24468372016-01-14 13:27:27 -06002128 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2129 queue_writel(queue, ISR, -1);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002130 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002131}
2132
Jamie Iles70c9f3d2011-03-09 16:22:54 +00002133static u32 gem_mdc_clk_div(struct macb *bp)
2134{
2135 u32 config;
2136 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2137
2138 if (pclk_hz <= 20000000)
2139 config = GEM_BF(CLK, GEM_CLK_DIV8);
2140 else if (pclk_hz <= 40000000)
2141 config = GEM_BF(CLK, GEM_CLK_DIV16);
2142 else if (pclk_hz <= 80000000)
2143 config = GEM_BF(CLK, GEM_CLK_DIV32);
2144 else if (pclk_hz <= 120000000)
2145 config = GEM_BF(CLK, GEM_CLK_DIV48);
2146 else if (pclk_hz <= 160000000)
2147 config = GEM_BF(CLK, GEM_CLK_DIV64);
2148 else
2149 config = GEM_BF(CLK, GEM_CLK_DIV96);
2150
2151 return config;
2152}
2153
2154static u32 macb_mdc_clk_div(struct macb *bp)
2155{
2156 u32 config;
2157 unsigned long pclk_hz;
2158
2159 if (macb_is_gem(bp))
2160 return gem_mdc_clk_div(bp);
2161
2162 pclk_hz = clk_get_rate(bp->pclk);
2163 if (pclk_hz <= 20000000)
2164 config = MACB_BF(CLK, MACB_CLK_DIV8);
2165 else if (pclk_hz <= 40000000)
2166 config = MACB_BF(CLK, MACB_CLK_DIV16);
2167 else if (pclk_hz <= 80000000)
2168 config = MACB_BF(CLK, MACB_CLK_DIV32);
2169 else
2170 config = MACB_BF(CLK, MACB_CLK_DIV64);
2171
2172 return config;
2173}
2174
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002175/* Get the DMA bus width field of the network configuration register that we
Jamie Iles757a03c2011-03-09 16:29:59 +00002176 * should program. We find the width from decoding the design configuration
2177 * register to find the maximum supported data bus width.
2178 */
2179static u32 macb_dbw(struct macb *bp)
2180{
2181 if (!macb_is_gem(bp))
2182 return 0;
2183
2184 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2185 case 4:
2186 return GEM_BF(DBW, GEM_DBW128);
2187 case 2:
2188 return GEM_BF(DBW, GEM_DBW64);
2189 case 1:
2190 default:
2191 return GEM_BF(DBW, GEM_DBW32);
2192 }
2193}
2194
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002195/* Configure the receive DMA engine
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002196 * - use the correct receive buffer size
Nicolas Ferree1755872014-07-24 13:50:58 +02002197 * - set best burst length for DMA operations
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002198 * (if not supported by FIFO, it will fallback to default)
2199 * - set both rx/tx packet buffers to full memory size
2200 * These are configurable parameters for GEM.
Jamie Iles0116da42011-03-14 17:38:30 +00002201 */
2202static void macb_configure_dma(struct macb *bp)
2203{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002204 struct macb_queue *queue;
2205 u32 buffer_size;
2206 unsigned int q;
Jamie Iles0116da42011-03-14 17:38:30 +00002207 u32 dmacfg;
2208
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002209 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
Jamie Iles0116da42011-03-14 17:38:30 +00002210 if (macb_is_gem(bp)) {
2211 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002212 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2213 if (q)
2214 queue_writel(queue, RBQS, buffer_size);
2215 else
2216 dmacfg |= GEM_BF(RXBS, buffer_size);
2217 }
Nicolas Ferree1755872014-07-24 13:50:58 +02002218 if (bp->dma_burst_length)
2219 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002220 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
Arun Chandrana50dad32015-02-18 16:59:35 +05302221 dmacfg &= ~GEM_BIT(ENDIA_PKT);
Arun Chandran62f69242015-03-01 11:38:02 +05302222
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03002223 if (bp->native_io)
Arun Chandran62f69242015-03-01 11:38:02 +05302224 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2225 else
2226 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2227
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02002228 if (bp->dev->features & NETIF_F_HW_CSUM)
2229 dmacfg |= GEM_BIT(TXCOEN);
2230 else
2231 dmacfg &= ~GEM_BIT(TXCOEN);
Harini Katakamfff80192016-08-09 13:15:53 +05302232
Michal Simekbd620722018-09-25 08:32:50 +02002233 dmacfg &= ~GEM_BIT(ADDR64);
Harini Katakamfff80192016-08-09 13:15:53 +05302234#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002235 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002236 dmacfg |= GEM_BIT(ADDR64);
Harini Katakamfff80192016-08-09 13:15:53 +05302237#endif
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002238#ifdef CONFIG_MACB_USE_HWSTAMP
2239 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2240 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2241#endif
Nicolas Ferree1755872014-07-24 13:50:58 +02002242 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2243 dmacfg);
Jamie Iles0116da42011-03-14 17:38:30 +00002244 gem_writel(bp, DMACFG, dmacfg);
2245 }
2246}
2247
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002248static void macb_init_hw(struct macb *bp)
2249{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002250 struct macb_queue *queue;
2251 unsigned int q;
2252
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002253 u32 config;
2254
2255 macb_reset_hw(bp);
Joachim Eastwood314bccc2012-11-07 08:14:52 +00002256 macb_set_hwaddr(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002257
Jamie Iles70c9f3d2011-03-09 16:22:54 +00002258 config = macb_mdc_clk_div(bp);
Punnaiah Choudary Kalluri022be252015-11-18 09:03:50 +05302259 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2260 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00002261 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002262 config |= MACB_BIT(PAE); /* PAuse Enable */
2263 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
Dan Carpentera104a6b2015-05-12 21:15:24 +03002264 if (bp->caps & MACB_CAPS_JUMBO)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302265 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
2266 else
2267 config |= MACB_BIT(BIG); /* Receive oversized frames */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002268 if (bp->dev->flags & IFF_PROMISC)
2269 config |= MACB_BIT(CAF); /* Copy All Frames */
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002270 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2271 config |= GEM_BIT(RXCOEN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002272 if (!(bp->dev->flags & IFF_BROADCAST))
2273 config |= MACB_BIT(NBC); /* No BroadCast */
Jamie Iles757a03c2011-03-09 16:29:59 +00002274 config |= macb_dbw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002275 macb_writel(bp, NCFGR, config);
Dan Carpentera104a6b2015-05-12 21:15:24 +03002276 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302277 gem_writel(bp, JML, bp->jumbo_max_len);
Vitalii Demianets26cdfb42012-11-02 07:09:24 +00002278 bp->speed = SPEED_10;
2279 bp->duplex = DUPLEX_HALF;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302280 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
Dan Carpentera104a6b2015-05-12 21:15:24 +03002281 if (bp->caps & MACB_CAPS_JUMBO)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302282 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002283
Jamie Iles0116da42011-03-14 17:38:30 +00002284 macb_configure_dma(bp);
2285
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002286 /* Initialize TX and RX buffers */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002287 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002288 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2289#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2290 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2291 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2292#endif
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002293 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +05302294#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002295 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002296 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +05302297#endif
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002298
2299 /* Enable interrupts */
2300 queue_writel(queue, IER,
Harini Katakame5010702019-01-29 15:20:03 +05302301 bp->rx_intr_mask |
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002302 MACB_TX_INT_FLAGS |
2303 MACB_BIT(HRESP));
2304 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002305
2306 /* Enable TX and RX */
Anssi Hannula0da70f82018-08-23 10:45:22 +03002307 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002308}
2309
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002310/* The hash address register is 64 bits long and takes up two
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002311 * locations in the memory map. The least significant bits are stored
2312 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2313 *
2314 * The unicast hash enable and the multicast hash enable bits in the
2315 * network configuration register enable the reception of hash matched
2316 * frames. The destination address is reduced to a 6 bit index into
2317 * the 64 bit hash register using the following hash function. The
2318 * hash function is an exclusive or of every sixth bit of the
2319 * destination address.
2320 *
2321 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2322 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2323 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2324 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2325 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2326 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2327 *
2328 * da[0] represents the least significant bit of the first byte
2329 * received, that is, the multicast/unicast indicator, and da[47]
2330 * represents the most significant bit of the last byte received. If
2331 * the hash index, hi[n], points to a bit that is set in the hash
2332 * register then the frame will be matched according to whether the
2333 * frame is multicast or unicast. A multicast match will be signalled
2334 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2335 * index points to a bit set in the hash register. A unicast match
2336 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2337 * and the hash index points to a bit set in the hash register. To
2338 * receive all multicast frames, the hash register should be set with
2339 * all ones and the multicast hash enable bit should be set in the
2340 * network configuration register.
2341 */
2342
2343static inline int hash_bit_value(int bitnr, __u8 *addr)
2344{
2345 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2346 return 1;
2347 return 0;
2348}
2349
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002350/* Return the hash index value for the specified address. */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002351static int hash_get_index(__u8 *addr)
2352{
2353 int i, j, bitval;
2354 int hash_index = 0;
2355
2356 for (j = 0; j < 6; j++) {
2357 for (i = 0, bitval = 0; i < 8; i++)
Xander Huff2fa45e22015-01-15 15:55:19 -06002358 bitval ^= hash_bit_value(i * 6 + j, addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002359
2360 hash_index |= (bitval << j);
2361 }
2362
2363 return hash_index;
2364}
2365
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002366/* Add multicast addresses to the internal multicast-hash table. */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002367static void macb_sethashtable(struct net_device *dev)
2368{
Jiri Pirko22bedad32010-04-01 21:22:57 +00002369 struct netdev_hw_addr *ha;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002370 unsigned long mc_filter[2];
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00002371 unsigned int bitnr;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002372 struct macb *bp = netdev_priv(dev);
2373
Moritz Fischeraa50b552016-03-29 19:11:13 -07002374 mc_filter[0] = 0;
2375 mc_filter[1] = 0;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002376
Jiri Pirko22bedad32010-04-01 21:22:57 +00002377 netdev_for_each_mc_addr(ha, dev) {
2378 bitnr = hash_get_index(ha->addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002379 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2380 }
2381
Jamie Ilesf75ba502011-11-08 10:12:32 +00002382 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2383 macb_or_gem_writel(bp, HRT, mc_filter[1]);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002384}
2385
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002386/* Enable/Disable promiscuous and multicast modes. */
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01002387static void macb_set_rx_mode(struct net_device *dev)
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002388{
2389 unsigned long cfg;
2390 struct macb *bp = netdev_priv(dev);
2391
2392 cfg = macb_readl(bp, NCFGR);
2393
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002394 if (dev->flags & IFF_PROMISC) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002395 /* Enable promiscuous mode */
2396 cfg |= MACB_BIT(CAF);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002397
2398 /* Disable RX checksum offload */
2399 if (macb_is_gem(bp))
2400 cfg &= ~GEM_BIT(RXCOEN);
2401 } else {
2402 /* Disable promiscuous mode */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002403 cfg &= ~MACB_BIT(CAF);
2404
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002405 /* Enable RX checksum offload only if requested */
2406 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2407 cfg |= GEM_BIT(RXCOEN);
2408 }
2409
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002410 if (dev->flags & IFF_ALLMULTI) {
2411 /* Enable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00002412 macb_or_gem_writel(bp, HRB, -1);
2413 macb_or_gem_writel(bp, HRT, -1);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002414 cfg |= MACB_BIT(NCFGR_MTI);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002415 } else if (!netdev_mc_empty(dev)) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002416 /* Enable specific multicasts */
2417 macb_sethashtable(dev);
2418 cfg |= MACB_BIT(NCFGR_MTI);
2419 } else if (dev->flags & (~IFF_ALLMULTI)) {
2420 /* Disable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00002421 macb_or_gem_writel(bp, HRB, 0);
2422 macb_or_gem_writel(bp, HRT, 0);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002423 cfg &= ~MACB_BIT(NCFGR_MTI);
2424 }
2425
2426 macb_writel(bp, NCFGR, cfg);
2427}
2428
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002429static int macb_open(struct net_device *dev)
2430{
2431 struct macb *bp = netdev_priv(dev);
Nicolas Ferre4df95132013-06-04 21:57:12 +00002432 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002433 struct macb_queue *queue;
2434 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002435 int err;
2436
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002437 netdev_dbg(bp->dev, "open\n");
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002438
Harini Katakamd54f89a2019-03-01 16:20:34 +05302439 err = pm_runtime_get_sync(&bp->pdev->dev);
2440 if (err < 0)
2441 goto pm_exit;
2442
Nicolas Ferre03fc4722012-07-03 23:14:13 +00002443 /* carrier starts down */
2444 netif_carrier_off(dev);
2445
frederic RODO6c36a702007-07-12 19:07:24 +02002446 /* if the phy is not yet register, retry later*/
Harini Katakamd54f89a2019-03-01 16:20:34 +05302447 if (!dev->phydev) {
2448 err = -EAGAIN;
2449 goto pm_exit;
2450 }
frederic RODO6c36a702007-07-12 19:07:24 +02002451
Nicolas Ferre1b447912013-06-04 21:57:11 +00002452 /* RX buffers initialization */
Nicolas Ferre4df95132013-06-04 21:57:12 +00002453 macb_init_rx_buffer_size(bp, bufsz);
Nicolas Ferre1b447912013-06-04 21:57:11 +00002454
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002455 err = macb_alloc_consistent(bp);
2456 if (err) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002457 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2458 err);
Harini Katakamd54f89a2019-03-01 16:20:34 +05302459 goto pm_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002460 }
2461
Nicolas Ferre4df95132013-06-04 21:57:12 +00002462 bp->macbgem_ops.mog_init_rings(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002463 macb_init_hw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002464
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002465 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2466 napi_enable(&queue->napi);
2467
frederic RODO6c36a702007-07-12 19:07:24 +02002468 /* schedule a link state check */
Philippe Reynes0a912812016-06-22 00:32:35 +02002469 phy_start(dev->phydev);
frederic RODO6c36a702007-07-12 19:07:24 +02002470
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002471 netif_tx_start_all_queues(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002472
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002473 if (bp->ptp_info)
2474 bp->ptp_info->ptp_init(dev);
2475
Harini Katakamd54f89a2019-03-01 16:20:34 +05302476pm_exit:
2477 if (err) {
2478 pm_runtime_put_sync(&bp->pdev->dev);
2479 return err;
2480 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002481 return 0;
2482}
2483
2484static int macb_close(struct net_device *dev)
2485{
2486 struct macb *bp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002487 struct macb_queue *queue;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002488 unsigned long flags;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002489 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002490
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002491 netif_tx_stop_all_queues(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002492
2493 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2494 napi_disable(&queue->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002495
Philippe Reynes0a912812016-06-22 00:32:35 +02002496 if (dev->phydev)
2497 phy_stop(dev->phydev);
frederic RODO6c36a702007-07-12 19:07:24 +02002498
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002499 spin_lock_irqsave(&bp->lock, flags);
2500 macb_reset_hw(bp);
2501 netif_carrier_off(dev);
2502 spin_unlock_irqrestore(&bp->lock, flags);
2503
2504 macb_free_consistent(bp);
2505
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002506 if (bp->ptp_info)
2507 bp->ptp_info->ptp_remove(dev);
2508
Harini Katakamd54f89a2019-03-01 16:20:34 +05302509 pm_runtime_put(&bp->pdev->dev);
2510
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002511 return 0;
2512}
2513
Harini Katakama5898ea2015-05-06 22:27:18 +05302514static int macb_change_mtu(struct net_device *dev, int new_mtu)
2515{
Harini Katakama5898ea2015-05-06 22:27:18 +05302516 if (netif_running(dev))
2517 return -EBUSY;
2518
Harini Katakama5898ea2015-05-06 22:27:18 +05302519 dev->mtu = new_mtu;
2520
2521 return 0;
2522}
2523
Jamie Ilesa494ed82011-03-09 16:26:35 +00002524static void gem_update_stats(struct macb *bp)
2525{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002526 struct macb_queue *queue;
2527 unsigned int i, q, idx;
2528 unsigned long *stat;
2529
Jamie Ilesa494ed82011-03-09 16:26:35 +00002530 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002531
Xander Huff3ff13f12015-01-13 16:15:51 -06002532 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2533 u32 offset = gem_statistics[i].offset;
David S. Miller7a6e0702015-07-27 14:24:48 -07002534 u64 val = bp->macb_reg_readl(bp, offset);
Xander Huff3ff13f12015-01-13 16:15:51 -06002535
2536 bp->ethtool_stats[i] += val;
2537 *p += val;
2538
2539 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2540 /* Add GEM_OCTTXH, GEM_OCTRXH */
David S. Miller7a6e0702015-07-27 14:24:48 -07002541 val = bp->macb_reg_readl(bp, offset + 4);
Xander Huff2fa45e22015-01-15 15:55:19 -06002542 bp->ethtool_stats[i] += ((u64)val) << 32;
Xander Huff3ff13f12015-01-13 16:15:51 -06002543 *(++p) += val;
2544 }
2545 }
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002546
2547 idx = GEM_STATS_LEN;
2548 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2549 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2550 bp->ethtool_stats[idx++] = *stat;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002551}
2552
2553static struct net_device_stats *gem_get_stats(struct macb *bp)
2554{
2555 struct gem_stats *hwstat = &bp->hw_stats.gem;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02002556 struct net_device_stats *nstat = &bp->dev->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002557
2558 gem_update_stats(bp);
2559
2560 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2561 hwstat->rx_alignment_errors +
2562 hwstat->rx_resource_errors +
2563 hwstat->rx_overruns +
2564 hwstat->rx_oversize_frames +
2565 hwstat->rx_jabbers +
2566 hwstat->rx_undersized_frames +
2567 hwstat->rx_length_field_frame_errors);
2568 nstat->tx_errors = (hwstat->tx_late_collisions +
2569 hwstat->tx_excessive_collisions +
2570 hwstat->tx_underrun +
2571 hwstat->tx_carrier_sense_errors);
2572 nstat->multicast = hwstat->rx_multicast_frames;
2573 nstat->collisions = (hwstat->tx_single_collision_frames +
2574 hwstat->tx_multiple_collision_frames +
2575 hwstat->tx_excessive_collisions);
2576 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2577 hwstat->rx_jabbers +
2578 hwstat->rx_undersized_frames +
2579 hwstat->rx_length_field_frame_errors);
2580 nstat->rx_over_errors = hwstat->rx_resource_errors;
2581 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2582 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2583 nstat->rx_fifo_errors = hwstat->rx_overruns;
2584 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2585 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2586 nstat->tx_fifo_errors = hwstat->tx_underrun;
2587
2588 return nstat;
2589}
2590
Xander Huff3ff13f12015-01-13 16:15:51 -06002591static void gem_get_ethtool_stats(struct net_device *dev,
2592 struct ethtool_stats *stats, u64 *data)
2593{
2594 struct macb *bp;
2595
2596 bp = netdev_priv(dev);
2597 gem_update_stats(bp);
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002598 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2599 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
Xander Huff3ff13f12015-01-13 16:15:51 -06002600}
2601
2602static int gem_get_sset_count(struct net_device *dev, int sset)
2603{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002604 struct macb *bp = netdev_priv(dev);
2605
Xander Huff3ff13f12015-01-13 16:15:51 -06002606 switch (sset) {
2607 case ETH_SS_STATS:
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002608 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
Xander Huff3ff13f12015-01-13 16:15:51 -06002609 default:
2610 return -EOPNOTSUPP;
2611 }
2612}
2613
2614static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2615{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002616 char stat_string[ETH_GSTRING_LEN];
2617 struct macb *bp = netdev_priv(dev);
2618 struct macb_queue *queue;
Andy Shevchenko8bcbf822015-07-24 21:24:02 +03002619 unsigned int i;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002620 unsigned int q;
Xander Huff3ff13f12015-01-13 16:15:51 -06002621
2622 switch (sset) {
2623 case ETH_SS_STATS:
2624 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2625 memcpy(p, gem_statistics[i].stat_string,
2626 ETH_GSTRING_LEN);
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002627
2628 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2629 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2630 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2631 q, queue_statistics[i].stat_string);
2632 memcpy(p, stat_string, ETH_GSTRING_LEN);
2633 }
2634 }
Xander Huff3ff13f12015-01-13 16:15:51 -06002635 break;
2636 }
2637}
2638
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01002639static struct net_device_stats *macb_get_stats(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002640{
2641 struct macb *bp = netdev_priv(dev);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02002642 struct net_device_stats *nstat = &bp->dev->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002643 struct macb_stats *hwstat = &bp->hw_stats.macb;
2644
2645 if (macb_is_gem(bp))
2646 return gem_get_stats(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002647
frederic RODO6c36a702007-07-12 19:07:24 +02002648 /* read stats from hardware */
2649 macb_update_stats(bp);
2650
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002651 /* Convert HW stats into netdevice stats */
2652 nstat->rx_errors = (hwstat->rx_fcs_errors +
2653 hwstat->rx_align_errors +
2654 hwstat->rx_resource_errors +
2655 hwstat->rx_overruns +
2656 hwstat->rx_oversize_pkts +
2657 hwstat->rx_jabbers +
2658 hwstat->rx_undersize_pkts +
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002659 hwstat->rx_length_mismatch);
2660 nstat->tx_errors = (hwstat->tx_late_cols +
2661 hwstat->tx_excessive_cols +
2662 hwstat->tx_underruns +
Wolfgang Steinwender716723c2015-04-10 11:42:56 +02002663 hwstat->tx_carrier_errors +
2664 hwstat->sqe_test_errors);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002665 nstat->collisions = (hwstat->tx_single_cols +
2666 hwstat->tx_multiple_cols +
2667 hwstat->tx_excessive_cols);
2668 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2669 hwstat->rx_jabbers +
2670 hwstat->rx_undersize_pkts +
2671 hwstat->rx_length_mismatch);
Alexander Steinb19f7f72011-04-13 05:03:24 +00002672 nstat->rx_over_errors = hwstat->rx_resource_errors +
2673 hwstat->rx_overruns;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002674 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2675 nstat->rx_frame_errors = hwstat->rx_align_errors;
2676 nstat->rx_fifo_errors = hwstat->rx_overruns;
2677 /* XXX: What does "missed" mean? */
2678 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2679 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2680 nstat->tx_fifo_errors = hwstat->tx_underruns;
2681 /* Don't know about heartbeat or window errors... */
2682
2683 return nstat;
2684}
2685
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002686static int macb_get_regs_len(struct net_device *netdev)
2687{
2688 return MACB_GREGS_NBR * sizeof(u32);
2689}
2690
2691static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2692 void *p)
2693{
2694 struct macb *bp = netdev_priv(dev);
2695 unsigned int tail, head;
2696 u32 *regs_buff = p;
2697
2698 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2699 | MACB_GREGS_VERSION;
2700
Zach Brownb410d132016-10-19 09:56:57 -05002701 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2702 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002703
2704 regs_buff[0] = macb_readl(bp, NCR);
2705 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2706 regs_buff[2] = macb_readl(bp, NSR);
2707 regs_buff[3] = macb_readl(bp, TSR);
2708 regs_buff[4] = macb_readl(bp, RBQP);
2709 regs_buff[5] = macb_readl(bp, TBQP);
2710 regs_buff[6] = macb_readl(bp, RSR);
2711 regs_buff[7] = macb_readl(bp, IMR);
2712
2713 regs_buff[8] = tail;
2714 regs_buff[9] = head;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002715 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2716 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002717
Neil Armstrongce721a72016-01-05 14:39:16 +01002718 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2719 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002720 if (macb_is_gem(bp))
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002721 regs_buff[13] = gem_readl(bp, DMACFG);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002722}
2723
Sergio Prado3e2a5e12016-02-09 12:07:16 -02002724static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2725{
2726 struct macb *bp = netdev_priv(netdev);
2727
2728 wol->supported = 0;
2729 wol->wolopts = 0;
2730
2731 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2732 wol->supported = WAKE_MAGIC;
2733
2734 if (bp->wol & MACB_WOL_ENABLED)
2735 wol->wolopts |= WAKE_MAGIC;
2736 }
2737}
2738
2739static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2740{
2741 struct macb *bp = netdev_priv(netdev);
2742
2743 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2744 (wol->wolopts & ~WAKE_MAGIC))
2745 return -EOPNOTSUPP;
2746
2747 if (wol->wolopts & WAKE_MAGIC)
2748 bp->wol |= MACB_WOL_ENABLED;
2749 else
2750 bp->wol &= ~MACB_WOL_ENABLED;
2751
2752 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2753
2754 return 0;
2755}
2756
Zach Brown8441bb32016-10-19 09:56:58 -05002757static void macb_get_ringparam(struct net_device *netdev,
2758 struct ethtool_ringparam *ring)
2759{
2760 struct macb *bp = netdev_priv(netdev);
2761
2762 ring->rx_max_pending = MAX_RX_RING_SIZE;
2763 ring->tx_max_pending = MAX_TX_RING_SIZE;
2764
2765 ring->rx_pending = bp->rx_ring_size;
2766 ring->tx_pending = bp->tx_ring_size;
2767}
2768
2769static int macb_set_ringparam(struct net_device *netdev,
2770 struct ethtool_ringparam *ring)
2771{
2772 struct macb *bp = netdev_priv(netdev);
2773 u32 new_rx_size, new_tx_size;
2774 unsigned int reset = 0;
2775
2776 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2777 return -EINVAL;
2778
2779 new_rx_size = clamp_t(u32, ring->rx_pending,
2780 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2781 new_rx_size = roundup_pow_of_two(new_rx_size);
2782
2783 new_tx_size = clamp_t(u32, ring->tx_pending,
2784 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2785 new_tx_size = roundup_pow_of_two(new_tx_size);
2786
2787 if ((new_tx_size == bp->tx_ring_size) &&
2788 (new_rx_size == bp->rx_ring_size)) {
2789 /* nothing to do */
2790 return 0;
2791 }
2792
2793 if (netif_running(bp->dev)) {
2794 reset = 1;
2795 macb_close(bp->dev);
2796 }
2797
2798 bp->rx_ring_size = new_rx_size;
2799 bp->tx_ring_size = new_tx_size;
2800
2801 if (reset)
2802 macb_open(bp->dev);
2803
2804 return 0;
2805}
2806
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01002807#ifdef CONFIG_MACB_USE_HWSTAMP
2808static unsigned int gem_get_tsu_rate(struct macb *bp)
2809{
2810 struct clk *tsu_clk;
2811 unsigned int tsu_rate;
2812
2813 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2814 if (!IS_ERR(tsu_clk))
2815 tsu_rate = clk_get_rate(tsu_clk);
2816 /* try pclk instead */
2817 else if (!IS_ERR(bp->pclk)) {
2818 tsu_clk = bp->pclk;
2819 tsu_rate = clk_get_rate(tsu_clk);
2820 } else
2821 return -ENOTSUPP;
2822 return tsu_rate;
2823}
2824
2825static s32 gem_get_ptp_max_adj(void)
2826{
2827 return 64000000;
2828}
2829
2830static int gem_get_ts_info(struct net_device *dev,
2831 struct ethtool_ts_info *info)
2832{
2833 struct macb *bp = netdev_priv(dev);
2834
2835 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2836 ethtool_op_get_ts_info(dev, info);
2837 return 0;
2838 }
2839
2840 info->so_timestamping =
2841 SOF_TIMESTAMPING_TX_SOFTWARE |
2842 SOF_TIMESTAMPING_RX_SOFTWARE |
2843 SOF_TIMESTAMPING_SOFTWARE |
2844 SOF_TIMESTAMPING_TX_HARDWARE |
2845 SOF_TIMESTAMPING_RX_HARDWARE |
2846 SOF_TIMESTAMPING_RAW_HARDWARE;
2847 info->tx_types =
2848 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2849 (1 << HWTSTAMP_TX_OFF) |
2850 (1 << HWTSTAMP_TX_ON);
2851 info->rx_filters =
2852 (1 << HWTSTAMP_FILTER_NONE) |
2853 (1 << HWTSTAMP_FILTER_ALL);
2854
2855 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2856
2857 return 0;
2858}
2859
2860static struct macb_ptp_info gem_ptp_info = {
2861 .ptp_init = gem_ptp_init,
2862 .ptp_remove = gem_ptp_remove,
2863 .get_ptp_max_adj = gem_get_ptp_max_adj,
2864 .get_tsu_rate = gem_get_tsu_rate,
2865 .get_ts_info = gem_get_ts_info,
2866 .get_hwtst = gem_get_hwtst,
2867 .set_hwtst = gem_set_hwtst,
2868};
2869#endif
2870
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002871static int macb_get_ts_info(struct net_device *netdev,
2872 struct ethtool_ts_info *info)
2873{
2874 struct macb *bp = netdev_priv(netdev);
2875
2876 if (bp->ptp_info)
2877 return bp->ptp_info->get_ts_info(netdev, info);
2878
2879 return ethtool_op_get_ts_info(netdev, info);
2880}
2881
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002882static void gem_enable_flow_filters(struct macb *bp, bool enable)
2883{
2884 struct ethtool_rx_fs_item *item;
2885 u32 t2_scr;
2886 int num_t2_scr;
2887
2888 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2889
2890 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2891 struct ethtool_rx_flow_spec *fs = &item->fs;
2892 struct ethtool_tcpip4_spec *tp4sp_m;
2893
2894 if (fs->location >= num_t2_scr)
2895 continue;
2896
2897 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2898
2899 /* enable/disable screener regs for the flow entry */
2900 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2901
2902 /* only enable fields with no masking */
2903 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2904
2905 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2906 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2907 else
2908 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2909
2910 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2911 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2912 else
2913 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2914
2915 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2916 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2917 else
2918 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2919
2920 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2921 }
2922}
2923
2924static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2925{
2926 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2927 uint16_t index = fs->location;
2928 u32 w0, w1, t2_scr;
2929 bool cmp_a = false;
2930 bool cmp_b = false;
2931 bool cmp_c = false;
2932
2933 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2934 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2935
2936 /* ignore field if any masking set */
2937 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2938 /* 1st compare reg - IP source address */
2939 w0 = 0;
2940 w1 = 0;
2941 w0 = tp4sp_v->ip4src;
2942 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2943 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2944 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2945 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2946 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2947 cmp_a = true;
2948 }
2949
2950 /* ignore field if any masking set */
2951 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2952 /* 2nd compare reg - IP destination address */
2953 w0 = 0;
2954 w1 = 0;
2955 w0 = tp4sp_v->ip4dst;
2956 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2957 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2958 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
2959 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
2960 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
2961 cmp_b = true;
2962 }
2963
2964 /* ignore both port fields if masking set in both */
2965 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
2966 /* 3rd compare reg - source port, destination port */
2967 w0 = 0;
2968 w1 = 0;
2969 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
2970 if (tp4sp_m->psrc == tp4sp_m->pdst) {
2971 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
2972 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2973 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2974 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2975 } else {
2976 /* only one port definition */
2977 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
2978 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
2979 if (tp4sp_m->psrc == 0xFFFF) { /* src port */
2980 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
2981 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2982 } else { /* dst port */
2983 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2984 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
2985 }
2986 }
2987 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
2988 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
2989 cmp_c = true;
2990 }
2991
2992 t2_scr = 0;
2993 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
2994 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
2995 if (cmp_a)
2996 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
2997 if (cmp_b)
2998 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
2999 if (cmp_c)
3000 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3001 gem_writel_n(bp, SCRT2, index, t2_scr);
3002}
3003
3004static int gem_add_flow_filter(struct net_device *netdev,
3005 struct ethtool_rxnfc *cmd)
3006{
3007 struct macb *bp = netdev_priv(netdev);
3008 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3009 struct ethtool_rx_fs_item *item, *newfs;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003010 unsigned long flags;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003011 int ret = -EINVAL;
3012 bool added = false;
3013
Julia Cartwrightcc1674e2017-12-05 18:02:50 -06003014 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003015 if (newfs == NULL)
3016 return -ENOMEM;
3017 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3018
3019 netdev_dbg(netdev,
3020 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3021 fs->flow_type, (int)fs->ring_cookie, fs->location,
3022 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3023 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3024 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
3025
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003026 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3027
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003028 /* find correct place to add in list */
Julia Cartwrighta3da8ad2017-12-05 18:02:48 -06003029 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3030 if (item->fs.location > newfs->fs.location) {
3031 list_add_tail(&newfs->list, &item->list);
3032 added = true;
3033 break;
3034 } else if (item->fs.location == fs->location) {
3035 netdev_err(netdev, "Rule not added: location %d not free!\n",
3036 fs->location);
3037 ret = -EBUSY;
3038 goto err;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003039 }
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003040 }
Julia Cartwrighta3da8ad2017-12-05 18:02:48 -06003041 if (!added)
3042 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003043
3044 gem_prog_cmp_regs(bp, fs);
3045 bp->rx_fs_list.count++;
3046 /* enable filtering if NTUPLE on */
3047 if (netdev->features & NETIF_F_NTUPLE)
3048 gem_enable_flow_filters(bp, 1);
3049
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003050 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003051 return 0;
3052
3053err:
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003054 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003055 kfree(newfs);
3056 return ret;
3057}
3058
3059static int gem_del_flow_filter(struct net_device *netdev,
3060 struct ethtool_rxnfc *cmd)
3061{
3062 struct macb *bp = netdev_priv(netdev);
3063 struct ethtool_rx_fs_item *item;
3064 struct ethtool_rx_flow_spec *fs;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003065 unsigned long flags;
3066
3067 spin_lock_irqsave(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003068
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003069 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3070 if (item->fs.location == cmd->fs.location) {
3071 /* disable screener regs for the flow entry */
3072 fs = &(item->fs);
3073 netdev_dbg(netdev,
3074 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3075 fs->flow_type, (int)fs->ring_cookie, fs->location,
3076 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3077 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3078 htons(fs->h_u.tcp_ip4_spec.psrc),
3079 htons(fs->h_u.tcp_ip4_spec.pdst));
3080
3081 gem_writel_n(bp, SCRT2, fs->location, 0);
3082
3083 list_del(&item->list);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003084 bp->rx_fs_list.count--;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003085 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3086 kfree(item);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003087 return 0;
3088 }
3089 }
3090
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003091 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003092 return -EINVAL;
3093}
3094
3095static int gem_get_flow_entry(struct net_device *netdev,
3096 struct ethtool_rxnfc *cmd)
3097{
3098 struct macb *bp = netdev_priv(netdev);
3099 struct ethtool_rx_fs_item *item;
3100
3101 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3102 if (item->fs.location == cmd->fs.location) {
3103 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3104 return 0;
3105 }
3106 }
3107 return -EINVAL;
3108}
3109
3110static int gem_get_all_flow_entries(struct net_device *netdev,
3111 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3112{
3113 struct macb *bp = netdev_priv(netdev);
3114 struct ethtool_rx_fs_item *item;
3115 uint32_t cnt = 0;
3116
3117 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3118 if (cnt == cmd->rule_cnt)
3119 return -EMSGSIZE;
3120 rule_locs[cnt] = item->fs.location;
3121 cnt++;
3122 }
3123 cmd->data = bp->max_tuples;
3124 cmd->rule_cnt = cnt;
3125
3126 return 0;
3127}
3128
3129static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3130 u32 *rule_locs)
3131{
3132 struct macb *bp = netdev_priv(netdev);
3133 int ret = 0;
3134
3135 switch (cmd->cmd) {
3136 case ETHTOOL_GRXRINGS:
3137 cmd->data = bp->num_queues;
3138 break;
3139 case ETHTOOL_GRXCLSRLCNT:
3140 cmd->rule_cnt = bp->rx_fs_list.count;
3141 break;
3142 case ETHTOOL_GRXCLSRULE:
3143 ret = gem_get_flow_entry(netdev, cmd);
3144 break;
3145 case ETHTOOL_GRXCLSRLALL:
3146 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3147 break;
3148 default:
3149 netdev_err(netdev,
3150 "Command parameter %d is not supported\n", cmd->cmd);
3151 ret = -EOPNOTSUPP;
3152 }
3153
3154 return ret;
3155}
3156
3157static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3158{
3159 struct macb *bp = netdev_priv(netdev);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003160 int ret;
3161
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003162 switch (cmd->cmd) {
3163 case ETHTOOL_SRXCLSRLINS:
3164 if ((cmd->fs.location >= bp->max_tuples)
3165 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3166 ret = -EINVAL;
3167 break;
3168 }
3169 ret = gem_add_flow_filter(netdev, cmd);
3170 break;
3171 case ETHTOOL_SRXCLSRLDEL:
3172 ret = gem_del_flow_filter(netdev, cmd);
3173 break;
3174 default:
3175 netdev_err(netdev,
3176 "Command parameter %d is not supported\n", cmd->cmd);
3177 ret = -EOPNOTSUPP;
3178 }
3179
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003180 return ret;
3181}
3182
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003183static const struct ethtool_ops macb_ethtool_ops = {
Nicolas Ferred1d1b532012-10-31 06:04:56 +00003184 .get_regs_len = macb_get_regs_len,
3185 .get_regs = macb_get_regs,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003186 .get_link = ethtool_op_get_link,
Richard Cochran17f393e2012-04-03 22:59:31 +00003187 .get_ts_info = ethtool_op_get_ts_info,
Sergio Prado3e2a5e12016-02-09 12:07:16 -02003188 .get_wol = macb_get_wol,
3189 .set_wol = macb_set_wol,
Philippe Reynes176275a2016-06-22 00:32:36 +02003190 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3191 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Zach Brown8441bb32016-10-19 09:56:58 -05003192 .get_ringparam = macb_get_ringparam,
3193 .set_ringparam = macb_set_ringparam,
Xander Huff8cd5a562015-01-15 15:55:20 -06003194};
Xander Huff8cd5a562015-01-15 15:55:20 -06003195
Lad, Prabhakar8093b1c2015-02-05 16:21:07 +00003196static const struct ethtool_ops gem_ethtool_ops = {
Xander Huff8cd5a562015-01-15 15:55:20 -06003197 .get_regs_len = macb_get_regs_len,
3198 .get_regs = macb_get_regs,
3199 .get_link = ethtool_op_get_link,
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003200 .get_ts_info = macb_get_ts_info,
Xander Huff3ff13f12015-01-13 16:15:51 -06003201 .get_ethtool_stats = gem_get_ethtool_stats,
3202 .get_strings = gem_get_ethtool_strings,
3203 .get_sset_count = gem_get_sset_count,
Philippe Reynes176275a2016-06-22 00:32:36 +02003204 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3205 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Zach Brown8441bb32016-10-19 09:56:58 -05003206 .get_ringparam = macb_get_ringparam,
3207 .set_ringparam = macb_set_ringparam,
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003208 .get_rxnfc = gem_get_rxnfc,
3209 .set_rxnfc = gem_set_rxnfc,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003210};
3211
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003212static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003213{
Philippe Reynes0a912812016-06-22 00:32:35 +02003214 struct phy_device *phydev = dev->phydev;
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003215 struct macb *bp = netdev_priv(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003216
3217 if (!netif_running(dev))
3218 return -EINVAL;
3219
frederic RODO6c36a702007-07-12 19:07:24 +02003220 if (!phydev)
3221 return -ENODEV;
3222
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003223 if (!bp->ptp_info)
3224 return phy_mii_ioctl(phydev, rq, cmd);
3225
3226 switch (cmd) {
3227 case SIOCSHWTSTAMP:
3228 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3229 case SIOCGHWTSTAMP:
3230 return bp->ptp_info->get_hwtst(dev, rq);
3231 default:
3232 return phy_mii_ioctl(phydev, rq, cmd);
3233 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003234}
3235
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003236static int macb_set_features(struct net_device *netdev,
3237 netdev_features_t features)
3238{
3239 struct macb *bp = netdev_priv(netdev);
3240 netdev_features_t changed = features ^ netdev->features;
3241
3242 /* TX checksum offload */
3243 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3244 u32 dmacfg;
3245
3246 dmacfg = gem_readl(bp, DMACFG);
3247 if (features & NETIF_F_HW_CSUM)
3248 dmacfg |= GEM_BIT(TXCOEN);
3249 else
3250 dmacfg &= ~GEM_BIT(TXCOEN);
3251 gem_writel(bp, DMACFG, dmacfg);
3252 }
3253
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003254 /* RX checksum offload */
3255 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3256 u32 netcfg;
3257
3258 netcfg = gem_readl(bp, NCFGR);
3259 if (features & NETIF_F_RXCSUM &&
3260 !(netdev->flags & IFF_PROMISC))
3261 netcfg |= GEM_BIT(RXCOEN);
3262 else
3263 netcfg &= ~GEM_BIT(RXCOEN);
3264 gem_writel(bp, NCFGR, netcfg);
3265 }
3266
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003267 /* RX Flow Filters */
3268 if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3269 bool turn_on = features & NETIF_F_NTUPLE;
3270
3271 gem_enable_flow_filters(bp, turn_on);
3272 }
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003273 return 0;
3274}
3275
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003276static const struct net_device_ops macb_netdev_ops = {
3277 .ndo_open = macb_open,
3278 .ndo_stop = macb_close,
3279 .ndo_start_xmit = macb_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00003280 .ndo_set_rx_mode = macb_set_rx_mode,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003281 .ndo_get_stats = macb_get_stats,
3282 .ndo_do_ioctl = macb_ioctl,
3283 .ndo_validate_addr = eth_validate_addr,
Harini Katakama5898ea2015-05-06 22:27:18 +05303284 .ndo_change_mtu = macb_change_mtu,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003285 .ndo_set_mac_address = eth_mac_addr,
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07003286#ifdef CONFIG_NET_POLL_CONTROLLER
3287 .ndo_poll_controller = macb_poll_controller,
3288#endif
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003289 .ndo_set_features = macb_set_features,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00003290 .ndo_features_check = macb_features_check,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003291};
3292
Moritz Fischer64ec42f2016-03-29 19:11:12 -07003293/* Configure peripheral capabilities according to device tree
Nicolas Ferree1755872014-07-24 13:50:58 +02003294 * and integration options used
3295 */
Moritz Fischer64ec42f2016-03-29 19:11:12 -07003296static void macb_configure_caps(struct macb *bp,
3297 const struct macb_config *dt_conf)
Nicolas Ferree1755872014-07-24 13:50:58 +02003298{
3299 u32 dcfg;
Nicolas Ferree1755872014-07-24 13:50:58 +02003300
Nicolas Ferref6970502015-03-31 15:02:01 +02003301 if (dt_conf)
3302 bp->caps = dt_conf->caps;
3303
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03003304 if (hw_is_gem(bp->regs, bp->native_io)) {
Nicolas Ferree1755872014-07-24 13:50:58 +02003305 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3306
Nicolas Ferree1755872014-07-24 13:50:58 +02003307 dcfg = gem_readl(bp, DCFG1);
3308 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3309 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3310 dcfg = gem_readl(bp, DCFG2);
3311 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3312 bp->caps |= MACB_CAPS_FIFO_MODE;
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003313#ifdef CONFIG_MACB_USE_HWSTAMP
3314 if (gem_has_ptp(bp)) {
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003315 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3316 pr_err("GEM doesn't support hardware ptp.\n");
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003317 else {
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003318 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003319 bp->ptp_info = &gem_ptp_info;
3320 }
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003321 }
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003322#endif
Nicolas Ferree1755872014-07-24 13:50:58 +02003323 }
3324
Andy Shevchenkoa35919e2015-07-24 21:24:01 +03003325 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
Nicolas Ferree1755872014-07-24 13:50:58 +02003326}
3327
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003328static void macb_probe_queues(void __iomem *mem,
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03003329 bool native_io,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003330 unsigned int *queue_mask,
3331 unsigned int *num_queues)
3332{
3333 unsigned int hw_q;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003334
3335 *queue_mask = 0x1;
3336 *num_queues = 1;
3337
Nicolas Ferreda120112015-03-31 15:02:00 +02003338 /* is it macb or gem ?
3339 *
3340 * We need to read directly from the hardware here because
3341 * we are early in the probe process and don't have the
3342 * MACB_CAPS_MACB_IS_GEM flag positioned
3343 */
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03003344 if (!hw_is_gem(mem, native_io))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003345 return;
3346
3347 /* bit 0 is never set but queue 0 always exists */
Arun Chandrana50dad32015-02-18 16:59:35 +05303348 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3349
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003350 *queue_mask |= 0x1;
3351
3352 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3353 if (*queue_mask & (1 << hw_q))
3354 (*num_queues)++;
3355}
3356
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003357static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303358 struct clk **hclk, struct clk **tx_clk,
Harini Katakamf5473d12019-03-01 16:20:33 +05303359 struct clk **rx_clk, struct clk **tsu_clk)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003360{
Bartosz Folta83a77e92016-12-14 06:39:15 +00003361 struct macb_platform_data *pdata;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003362 int err;
3363
Bartosz Folta83a77e92016-12-14 06:39:15 +00003364 pdata = dev_get_platdata(&pdev->dev);
3365 if (pdata) {
3366 *pclk = pdata->pclk;
3367 *hclk = pdata->hclk;
3368 } else {
3369 *pclk = devm_clk_get(&pdev->dev, "pclk");
3370 *hclk = devm_clk_get(&pdev->dev, "hclk");
3371 }
3372
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003373 if (IS_ERR(*pclk)) {
3374 err = PTR_ERR(*pclk);
3375 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3376 return err;
3377 }
3378
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003379 if (IS_ERR(*hclk)) {
3380 err = PTR_ERR(*hclk);
3381 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3382 return err;
3383 }
3384
3385 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3386 if (IS_ERR(*tx_clk))
3387 *tx_clk = NULL;
3388
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303389 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3390 if (IS_ERR(*rx_clk))
3391 *rx_clk = NULL;
3392
Harini Katakamf5473d12019-03-01 16:20:33 +05303393 *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
3394 if (IS_ERR(*tsu_clk))
3395 *tsu_clk = NULL;
3396
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003397 err = clk_prepare_enable(*pclk);
3398 if (err) {
3399 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3400 return err;
3401 }
3402
3403 err = clk_prepare_enable(*hclk);
3404 if (err) {
3405 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3406 goto err_disable_pclk;
3407 }
3408
3409 err = clk_prepare_enable(*tx_clk);
3410 if (err) {
3411 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3412 goto err_disable_hclk;
3413 }
3414
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303415 err = clk_prepare_enable(*rx_clk);
3416 if (err) {
3417 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
3418 goto err_disable_txclk;
3419 }
3420
Harini Katakamf5473d12019-03-01 16:20:33 +05303421 err = clk_prepare_enable(*tsu_clk);
3422 if (err) {
3423 dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err);
3424 goto err_disable_rxclk;
3425 }
3426
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003427 return 0;
3428
Harini Katakamf5473d12019-03-01 16:20:33 +05303429err_disable_rxclk:
3430 clk_disable_unprepare(*rx_clk);
3431
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303432err_disable_txclk:
3433 clk_disable_unprepare(*tx_clk);
3434
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003435err_disable_hclk:
3436 clk_disable_unprepare(*hclk);
3437
3438err_disable_pclk:
3439 clk_disable_unprepare(*pclk);
3440
3441 return err;
3442}
3443
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003444static int macb_init(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003445{
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003446 struct net_device *dev = platform_get_drvdata(pdev);
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003447 unsigned int hw_q, q;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003448 struct macb *bp = netdev_priv(dev);
3449 struct macb_queue *queue;
3450 int err;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003451 u32 val, reg;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003452
Zach Brownb410d132016-10-19 09:56:57 -05003453 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3454 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3455
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003456 /* set the queue register mapping once for all: queue0 has a special
3457 * register mapping but we don't want to test the queue index then
3458 * compute the corresponding register offset at run time.
3459 */
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003460 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003461 if (!(bp->queue_mask & (1 << hw_q)))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003462 continue;
Jamie Iles461845d2011-03-08 20:19:23 +00003463
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003464 queue = &bp->queues[q];
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003465 queue->bp = bp;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003466 netif_napi_add(dev, &queue->napi, macb_poll, 64);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003467 if (hw_q) {
3468 queue->ISR = GEM_ISR(hw_q - 1);
3469 queue->IER = GEM_IER(hw_q - 1);
3470 queue->IDR = GEM_IDR(hw_q - 1);
3471 queue->IMR = GEM_IMR(hw_q - 1);
3472 queue->TBQP = GEM_TBQP(hw_q - 1);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003473 queue->RBQP = GEM_RBQP(hw_q - 1);
3474 queue->RBQS = GEM_RBQS(hw_q - 1);
Harini Katakamfff80192016-08-09 13:15:53 +05303475#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003476 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003477 queue->TBQPH = GEM_TBQPH(hw_q - 1);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003478 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3479 }
Harini Katakamfff80192016-08-09 13:15:53 +05303480#endif
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003481 } else {
3482 /* queue0 uses legacy registers */
3483 queue->ISR = MACB_ISR;
3484 queue->IER = MACB_IER;
3485 queue->IDR = MACB_IDR;
3486 queue->IMR = MACB_IMR;
3487 queue->TBQP = MACB_TBQP;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003488 queue->RBQP = MACB_RBQP;
Harini Katakamfff80192016-08-09 13:15:53 +05303489#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003490 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003491 queue->TBQPH = MACB_TBQPH;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003492 queue->RBQPH = MACB_RBQPH;
3493 }
Harini Katakamfff80192016-08-09 13:15:53 +05303494#endif
Soren Brinkmanne1824df2013-12-10 16:07:23 -08003495 }
Soren Brinkmanne1824df2013-12-10 16:07:23 -08003496
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003497 /* get irq: here we use the linux queue index, not the hardware
3498 * queue index. the queue irq definitions in the device tree
3499 * must remove the optional gaps that could exist in the
3500 * hardware queue mask.
3501 */
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003502 queue->irq = platform_get_irq(pdev, q);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003503 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
Punnaiah Choudary Kalluri20488232015-03-06 18:29:12 +01003504 IRQF_SHARED, dev->name, queue);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003505 if (err) {
3506 dev_err(&pdev->dev,
3507 "Unable to request IRQ %d (error %d)\n",
3508 queue->irq, err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003509 return err;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003510 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003511
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003512 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003513 q++;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003514 }
3515
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003516 dev->netdev_ops = &macb_netdev_ops;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003517
Nicolas Ferre4df95132013-06-04 21:57:12 +00003518 /* setup appropriated routines according to adapter type */
3519 if (macb_is_gem(bp)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003520 bp->max_tx_length = GEM_MAX_TX_LEN;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003521 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3522 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3523 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3524 bp->macbgem_ops.mog_rx = gem_rx;
Xander Huff8cd5a562015-01-15 15:55:20 -06003525 dev->ethtool_ops = &gem_ethtool_ops;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003526 } else {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003527 bp->max_tx_length = MACB_MAX_TX_LEN;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003528 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3529 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3530 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3531 bp->macbgem_ops.mog_rx = macb_rx;
Xander Huff8cd5a562015-01-15 15:55:20 -06003532 dev->ethtool_ops = &macb_ethtool_ops;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003533 }
3534
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003535 /* Set features */
3536 dev->hw_features = NETIF_F_SG;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00003537
3538 /* Check LSO capability */
3539 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3540 dev->hw_features |= MACB_NETIF_LSO;
3541
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003542 /* Checksum offload is only available on gem with packet buffer */
3543 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003544 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003545 if (bp->caps & MACB_CAPS_SG_DISABLED)
3546 dev->hw_features &= ~NETIF_F_SG;
3547 dev->features = dev->hw_features;
3548
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003549 /* Check RX Flow Filters support.
3550 * Max Rx flows set by availability of screeners & compare regs:
3551 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
3552 */
3553 reg = gem_readl(bp, DCFG8);
3554 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3555 GEM_BFEXT(T2SCR, reg));
3556 if (bp->max_tuples > 0) {
3557 /* also needs one ethtype match to check IPv4 */
3558 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3559 /* program this reg now */
3560 reg = 0;
3561 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3562 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3563 /* Filtering is supported in hw but don't enable it in kernel now */
3564 dev->hw_features |= NETIF_F_NTUPLE;
3565 /* init Rx flow definitions */
3566 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3567 bp->rx_fs_list.count = 0;
3568 spin_lock_init(&bp->rx_fs_lock);
3569 } else
3570 bp->max_tuples = 0;
3571 }
3572
Neil Armstrongce721a72016-01-05 14:39:16 +01003573 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3574 val = 0;
3575 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3576 val = GEM_BIT(RGMII);
3577 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003578 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
Neil Armstrongce721a72016-01-05 14:39:16 +01003579 val = MACB_BIT(RMII);
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003580 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
Neil Armstrongce721a72016-01-05 14:39:16 +01003581 val = MACB_BIT(MII);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003582
Neil Armstrongce721a72016-01-05 14:39:16 +01003583 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3584 val |= MACB_BIT(CLKEN);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003585
Neil Armstrongce721a72016-01-05 14:39:16 +01003586 macb_or_gem_writel(bp, USRIO, val);
3587 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003588
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003589 /* Set MII management clock divider */
3590 val = macb_mdc_clk_div(bp);
3591 val |= macb_dbw(bp);
Punnaiah Choudary Kalluri022be252015-11-18 09:03:50 +05303592 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3593 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003594 macb_writel(bp, NCFGR, val);
3595
3596 return 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003597}
3598
3599#if defined(CONFIG_OF)
3600/* 1518 rounded up */
3601#define AT91ETHER_MAX_RBUFF_SZ 0x600
3602/* max number of receive buffers */
3603#define AT91ETHER_MAX_RX_DESCR 9
3604
3605/* Initialize and start the Receiver and Transmit subsystems */
3606static int at91ether_start(struct net_device *dev)
3607{
3608 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003609 struct macb_queue *q = &lp->queues[0];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003610 struct macb_dma_desc *desc;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003611 dma_addr_t addr;
3612 u32 ctl;
3613 int i;
3614
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003615 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003616 (AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003617 macb_dma_desc_get_size(lp)),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003618 &q->rx_ring_dma, GFP_KERNEL);
3619 if (!q->rx_ring)
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003620 return -ENOMEM;
3621
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003622 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003623 AT91ETHER_MAX_RX_DESCR *
3624 AT91ETHER_MAX_RBUFF_SZ,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003625 &q->rx_buffers_dma, GFP_KERNEL);
3626 if (!q->rx_buffers) {
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003627 dma_free_coherent(&lp->pdev->dev,
3628 AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003629 macb_dma_desc_get_size(lp),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003630 q->rx_ring, q->rx_ring_dma);
3631 q->rx_ring = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003632 return -ENOMEM;
3633 }
3634
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003635 addr = q->rx_buffers_dma;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003636 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003637 desc = macb_rx_desc(q, i);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003638 macb_set_addr(lp, desc, addr);
3639 desc->ctrl = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003640 addr += AT91ETHER_MAX_RBUFF_SZ;
3641 }
3642
3643 /* Set the Wrap bit on the last descriptor */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003644 desc->addr |= MACB_BIT(RX_WRAP);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003645
3646 /* Reset buffer index */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003647 q->rx_tail = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003648
3649 /* Program address of descriptor list in Rx Buffer Queue register */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003650 macb_writel(lp, RBQP, q->rx_ring_dma);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003651
3652 /* Enable Receive and Transmit */
3653 ctl = macb_readl(lp, NCR);
3654 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3655
3656 return 0;
3657}
3658
3659/* Open the ethernet interface */
3660static int at91ether_open(struct net_device *dev)
3661{
3662 struct macb *lp = netdev_priv(dev);
3663 u32 ctl;
3664 int ret;
3665
3666 /* Clear internal statistics */
3667 ctl = macb_readl(lp, NCR);
3668 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3669
3670 macb_set_hwaddr(lp);
3671
3672 ret = at91ether_start(dev);
3673 if (ret)
3674 return ret;
3675
3676 /* Enable MAC interrupts */
3677 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3678 MACB_BIT(RXUBR) |
3679 MACB_BIT(ISR_TUND) |
3680 MACB_BIT(ISR_RLE) |
3681 MACB_BIT(TCOMP) |
3682 MACB_BIT(ISR_ROVR) |
3683 MACB_BIT(HRESP));
3684
3685 /* schedule a link state check */
Philippe Reynes0a912812016-06-22 00:32:35 +02003686 phy_start(dev->phydev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003687
3688 netif_start_queue(dev);
3689
3690 return 0;
3691}
3692
3693/* Close the interface */
3694static int at91ether_close(struct net_device *dev)
3695{
3696 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003697 struct macb_queue *q = &lp->queues[0];
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003698 u32 ctl;
3699
3700 /* Disable Receiver and Transmitter */
3701 ctl = macb_readl(lp, NCR);
3702 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3703
3704 /* Disable MAC interrupts */
3705 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3706 MACB_BIT(RXUBR) |
3707 MACB_BIT(ISR_TUND) |
3708 MACB_BIT(ISR_RLE) |
3709 MACB_BIT(TCOMP) |
3710 MACB_BIT(ISR_ROVR) |
3711 MACB_BIT(HRESP));
3712
3713 netif_stop_queue(dev);
3714
3715 dma_free_coherent(&lp->pdev->dev,
3716 AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003717 macb_dma_desc_get_size(lp),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003718 q->rx_ring, q->rx_ring_dma);
3719 q->rx_ring = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003720
3721 dma_free_coherent(&lp->pdev->dev,
3722 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003723 q->rx_buffers, q->rx_buffers_dma);
3724 q->rx_buffers = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003725
3726 return 0;
3727}
3728
3729/* Transmit packet */
Claudiu Beznead1c38952018-08-07 12:25:12 +03003730static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
3731 struct net_device *dev)
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003732{
3733 struct macb *lp = netdev_priv(dev);
3734
3735 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3736 netif_stop_queue(dev);
3737
3738 /* Store packet information (to free when Tx completed) */
3739 lp->skb = skb;
3740 lp->skb_length = skb->len;
Christoph Hellwig564923e2019-02-11 14:19:59 +01003741 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
3742 skb->len, DMA_TO_DEVICE);
3743 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
Alexey Khoroshilov178c7ae2016-11-19 01:40:10 +03003744 dev_kfree_skb_any(skb);
3745 dev->stats.tx_dropped++;
3746 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3747 return NETDEV_TX_OK;
3748 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003749
3750 /* Set address of the data in the Transmit Address register */
3751 macb_writel(lp, TAR, lp->skb_physaddr);
3752 /* Set length of the packet in the Transmit Control register */
3753 macb_writel(lp, TCR, skb->len);
3754
3755 } else {
3756 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3757 return NETDEV_TX_BUSY;
3758 }
3759
3760 return NETDEV_TX_OK;
3761}
3762
3763/* Extract received frame from buffer descriptors and sent to upper layers.
3764 * (Called from interrupt context)
3765 */
3766static void at91ether_rx(struct net_device *dev)
3767{
3768 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003769 struct macb_queue *q = &lp->queues[0];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003770 struct macb_dma_desc *desc;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003771 unsigned char *p_recv;
3772 struct sk_buff *skb;
3773 unsigned int pktlen;
3774
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003775 desc = macb_rx_desc(q, q->rx_tail);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003776 while (desc->addr & MACB_BIT(RX_USED)) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003777 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003778 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003779 skb = netdev_alloc_skb(dev, pktlen + 2);
3780 if (skb) {
3781 skb_reserve(skb, 2);
Johannes Berg59ae1d12017-06-16 14:29:20 +02003782 skb_put_data(skb, p_recv, pktlen);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003783
3784 skb->protocol = eth_type_trans(skb, dev);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003785 dev->stats.rx_packets++;
3786 dev->stats.rx_bytes += pktlen;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003787 netif_rx(skb);
3788 } else {
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003789 dev->stats.rx_dropped++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003790 }
3791
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003792 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003793 dev->stats.multicast++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003794
3795 /* reset ownership bit */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003796 desc->addr &= ~MACB_BIT(RX_USED);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003797
3798 /* wrap after last buffer */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003799 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3800 q->rx_tail = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003801 else
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003802 q->rx_tail++;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003803
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003804 desc = macb_rx_desc(q, q->rx_tail);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003805 }
3806}
3807
3808/* MAC interrupt handler */
3809static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3810{
3811 struct net_device *dev = dev_id;
3812 struct macb *lp = netdev_priv(dev);
3813 u32 intstatus, ctl;
3814
3815 /* MAC Interrupt Status register indicates what interrupts are pending.
3816 * It is automatically cleared once read.
3817 */
3818 intstatus = macb_readl(lp, ISR);
3819
3820 /* Receive complete */
3821 if (intstatus & MACB_BIT(RCOMP))
3822 at91ether_rx(dev);
3823
3824 /* Transmit complete */
3825 if (intstatus & MACB_BIT(TCOMP)) {
3826 /* The TCOM bit is set even if the transmission failed */
3827 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003828 dev->stats.tx_errors++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003829
3830 if (lp->skb) {
Yang Weib9560a22019-02-13 00:00:02 +08003831 dev_consume_skb_irq(lp->skb);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003832 lp->skb = NULL;
Christoph Hellwig564923e2019-02-11 14:19:59 +01003833 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003834 lp->skb_length, DMA_TO_DEVICE);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003835 dev->stats.tx_packets++;
3836 dev->stats.tx_bytes += lp->skb_length;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003837 }
3838 netif_wake_queue(dev);
3839 }
3840
3841 /* Work-around for EMAC Errata section 41.3.1 */
3842 if (intstatus & MACB_BIT(RXUBR)) {
3843 ctl = macb_readl(lp, NCR);
3844 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
Zumeng Chenffac0e92016-11-28 21:55:00 +08003845 wmb();
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003846 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3847 }
3848
3849 if (intstatus & MACB_BIT(ISR_ROVR))
3850 netdev_err(dev, "ROVR error\n");
3851
3852 return IRQ_HANDLED;
3853}
3854
3855#ifdef CONFIG_NET_POLL_CONTROLLER
3856static void at91ether_poll_controller(struct net_device *dev)
3857{
3858 unsigned long flags;
3859
3860 local_irq_save(flags);
3861 at91ether_interrupt(dev->irq, dev);
3862 local_irq_restore(flags);
3863}
3864#endif
3865
3866static const struct net_device_ops at91ether_netdev_ops = {
3867 .ndo_open = at91ether_open,
3868 .ndo_stop = at91ether_close,
3869 .ndo_start_xmit = at91ether_start_xmit,
3870 .ndo_get_stats = macb_get_stats,
3871 .ndo_set_rx_mode = macb_set_rx_mode,
3872 .ndo_set_mac_address = eth_mac_addr,
3873 .ndo_do_ioctl = macb_ioctl,
3874 .ndo_validate_addr = eth_validate_addr,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003875#ifdef CONFIG_NET_POLL_CONTROLLER
3876 .ndo_poll_controller = at91ether_poll_controller,
3877#endif
3878};
3879
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003880static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303881 struct clk **hclk, struct clk **tx_clk,
Harini Katakamf5473d12019-03-01 16:20:33 +05303882 struct clk **rx_clk, struct clk **tsu_clk)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003883{
3884 int err;
3885
3886 *hclk = NULL;
3887 *tx_clk = NULL;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303888 *rx_clk = NULL;
Harini Katakamf5473d12019-03-01 16:20:33 +05303889 *tsu_clk = NULL;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003890
3891 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3892 if (IS_ERR(*pclk))
3893 return PTR_ERR(*pclk);
3894
3895 err = clk_prepare_enable(*pclk);
3896 if (err) {
3897 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3898 return err;
3899 }
3900
3901 return 0;
3902}
3903
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003904static int at91ether_init(struct platform_device *pdev)
3905{
3906 struct net_device *dev = platform_get_drvdata(pdev);
3907 struct macb *bp = netdev_priv(dev);
3908 int err;
3909 u32 reg;
3910
Alexandre Bellonifec9d3b2018-06-26 10:44:01 +02003911 bp->queues[0].bp = bp;
3912
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003913 dev->netdev_ops = &at91ether_netdev_ops;
3914 dev->ethtool_ops = &macb_ethtool_ops;
3915
3916 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3917 0, dev->name, dev);
3918 if (err)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003919 return err;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003920
3921 macb_writel(bp, NCR, 0);
3922
3923 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3924 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3925 reg |= MACB_BIT(RM9200_RMII);
3926
3927 macb_writel(bp, NCFGR, reg);
3928
3929 return 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003930}
3931
David S. Miller3cef5c52015-03-09 23:38:02 -04003932static const struct macb_config at91sam9260_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003933 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003934 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003935 .init = macb_init,
3936};
3937
Nicolas Ferreeb4ed8e2018-09-14 17:48:10 +02003938static const struct macb_config sama5d3macb_config = {
3939 .caps = MACB_CAPS_SG_DISABLED
3940 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3941 .clk_init = macb_clk_init,
3942 .init = macb_init,
3943};
3944
David S. Miller3cef5c52015-03-09 23:38:02 -04003945static const struct macb_config pc302gem_config = {
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003946 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3947 .dma_burst_length = 16,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003948 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003949 .init = macb_init,
3950};
3951
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02003952static const struct macb_config sama5d2_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003953 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02003954 .dma_burst_length = 16,
3955 .clk_init = macb_clk_init,
3956 .init = macb_init,
3957};
3958
David S. Miller3cef5c52015-03-09 23:38:02 -04003959static const struct macb_config sama5d3_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003960 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
vishnuvardhan233a1582017-07-05 17:36:16 +02003961 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003962 .dma_burst_length = 16,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003963 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003964 .init = macb_init,
vishnuvardhan233a1582017-07-05 17:36:16 +02003965 .jumbo_max_len = 10240,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003966};
3967
David S. Miller3cef5c52015-03-09 23:38:02 -04003968static const struct macb_config sama5d4_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003969 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003970 .dma_burst_length = 4,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003971 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003972 .init = macb_init,
3973};
3974
David S. Miller3cef5c52015-03-09 23:38:02 -04003975static const struct macb_config emac_config = {
Harini Katakame5010702019-01-29 15:20:03 +05303976 .caps = MACB_CAPS_NEEDS_RSTONUBR,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003977 .clk_init = at91ether_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003978 .init = at91ether_init,
3979};
3980
Neil Armstronge611b5b2016-01-05 14:39:17 +01003981static const struct macb_config np4_config = {
3982 .caps = MACB_CAPS_USRIO_DISABLED,
3983 .clk_init = macb_clk_init,
3984 .init = macb_init,
3985};
David S. Miller36583eb2015-05-23 01:22:35 -04003986
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303987static const struct macb_config zynqmp_config = {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003988 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3989 MACB_CAPS_JUMBO |
Harini Katakam404cd082018-07-06 12:18:58 +05303990 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303991 .dma_burst_length = 16,
3992 .clk_init = macb_clk_init,
3993 .init = macb_init,
Harini Katakam98b5a0f42015-05-06 22:27:17 +05303994 .jumbo_max_len = 10240,
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303995};
3996
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05003997static const struct macb_config zynq_config = {
Harini Katakame5010702019-01-29 15:20:03 +05303998 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
3999 MACB_CAPS_NEEDS_RSTONUBR,
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05004000 .dma_burst_length = 16,
4001 .clk_init = macb_clk_init,
4002 .init = macb_init,
4003};
4004
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004005static const struct of_device_id macb_dt_ids[] = {
4006 { .compatible = "cdns,at32ap7000-macb" },
4007 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4008 { .compatible = "cdns,macb" },
Neil Armstronge611b5b2016-01-05 14:39:17 +01004009 { .compatible = "cdns,np4-macb", .data = &np4_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004010 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4011 { .compatible = "cdns,gem", .data = &pc302gem_config },
Nicolas Ferre3e3e0cd2019-02-06 18:56:10 +01004012 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02004013 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004014 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
Nicolas Ferreeb4ed8e2018-09-14 17:48:10 +02004015 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004016 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4017 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4018 { .compatible = "cdns,emac", .data = &emac_config },
Harini Katakam7b61f9c2015-05-06 22:27:16 +05304019 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05004020 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004021 { /* sentinel */ }
4022};
4023MODULE_DEVICE_TABLE(of, macb_dt_ids);
4024#endif /* CONFIG_OF */
4025
Bartosz Folta83a77e92016-12-14 06:39:15 +00004026static const struct macb_config default_gem_config = {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01004027 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4028 MACB_CAPS_JUMBO |
4029 MACB_CAPS_GEM_HAS_PTP,
Bartosz Folta83a77e92016-12-14 06:39:15 +00004030 .dma_burst_length = 16,
4031 .clk_init = macb_clk_init,
4032 .init = macb_init,
4033 .jumbo_max_len = 10240,
4034};
4035
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004036static int macb_probe(struct platform_device *pdev)
4037{
Bartosz Folta83a77e92016-12-14 06:39:15 +00004038 const struct macb_config *macb_config = &default_gem_config;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004039 int (*clk_init)(struct platform_device *, struct clk **,
Harini Katakamf5473d12019-03-01 16:20:33 +05304040 struct clk **, struct clk **, struct clk **,
4041 struct clk **) = macb_config->clk_init;
Bartosz Folta83a77e92016-12-14 06:39:15 +00004042 int (*init)(struct platform_device *) = macb_config->init;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004043 struct device_node *np = pdev->dev.of_node;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304044 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
Harini Katakamf5473d12019-03-01 16:20:33 +05304045 struct clk *tsu_clk = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004046 unsigned int queue_mask, num_queues;
4047 struct macb_platform_data *pdata;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004048 bool native_io;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004049 struct phy_device *phydev;
4050 struct net_device *dev;
4051 struct resource *regs;
4052 void __iomem *mem;
4053 const char *mac;
4054 struct macb *bp;
Harini Katakam404cd082018-07-06 12:18:58 +05304055 int err, val;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004056
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004057 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4058 mem = devm_ioremap_resource(&pdev->dev, regs);
4059 if (IS_ERR(mem))
4060 return PTR_ERR(mem);
4061
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004062 if (np) {
4063 const struct of_device_id *match;
4064
4065 match = of_match_node(macb_dt_ids, np);
4066 if (match && match->data) {
4067 macb_config = match->data;
4068 clk_init = macb_config->clk_init;
4069 init = macb_config->init;
4070 }
4071 }
4072
Harini Katakamf5473d12019-03-01 16:20:33 +05304073 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004074 if (err)
4075 return err;
4076
Harini Katakamd54f89a2019-03-01 16:20:34 +05304077 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4078 pm_runtime_use_autosuspend(&pdev->dev);
4079 pm_runtime_get_noresume(&pdev->dev);
4080 pm_runtime_set_active(&pdev->dev);
4081 pm_runtime_enable(&pdev->dev);
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004082 native_io = hw_is_native_io(mem);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004083
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004084 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004085 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004086 if (!dev) {
4087 err = -ENOMEM;
4088 goto err_disable_clocks;
4089 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004090
4091 dev->base_addr = regs->start;
4092
4093 SET_NETDEV_DEV(dev, &pdev->dev);
4094
4095 bp = netdev_priv(dev);
4096 bp->pdev = pdev;
4097 bp->dev = dev;
4098 bp->regs = mem;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004099 bp->native_io = native_io;
4100 if (native_io) {
David S. Miller7a6e0702015-07-27 14:24:48 -07004101 bp->macb_reg_readl = hw_readl_native;
4102 bp->macb_reg_writel = hw_writel_native;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004103 } else {
David S. Miller7a6e0702015-07-27 14:24:48 -07004104 bp->macb_reg_readl = hw_readl;
4105 bp->macb_reg_writel = hw_writel;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004106 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004107 bp->num_queues = num_queues;
Nicolas Ferrebfa09142015-03-31 15:01:59 +02004108 bp->queue_mask = queue_mask;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004109 if (macb_config)
4110 bp->dma_burst_length = macb_config->dma_burst_length;
4111 bp->pclk = pclk;
4112 bp->hclk = hclk;
4113 bp->tx_clk = tx_clk;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304114 bp->rx_clk = rx_clk;
Harini Katakamf5473d12019-03-01 16:20:33 +05304115 bp->tsu_clk = tsu_clk;
Andy Shevchenkof36dbe62015-07-24 21:24:00 +03004116 if (macb_config)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05304117 bp->jumbo_max_len = macb_config->jumbo_max_len;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05304118
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004119 bp->wol = 0;
Sergio Prado7c4a1d02016-02-16 21:10:45 -02004120 if (of_get_property(np, "magic-packet", NULL))
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004121 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4122 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4123
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004124 spin_lock_init(&bp->lock);
4125
Nicolas Ferread783472015-03-31 15:02:02 +02004126 /* setup capabilities */
Nicolas Ferref6970502015-03-31 15:02:01 +02004127 macb_configure_caps(bp, macb_config);
4128
Rafal Ozieblo7b429612017-06-29 07:12:51 +01004129#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4130 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4131 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
4132 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4133 }
4134#endif
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004135 platform_set_drvdata(pdev, dev);
4136
4137 dev->irq = platform_get_irq(pdev, 0);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004138 if (dev->irq < 0) {
4139 err = dev->irq;
Wei Yongjunb22ae0b2016-08-12 15:43:54 +00004140 goto err_out_free_netdev;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004141 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004142
Jarod Wilson44770e12016-10-17 15:54:17 -04004143 /* MTU range: 68 - 1500 or 10240 */
4144 dev->min_mtu = GEM_MTU_MIN_SIZE;
4145 if (bp->caps & MACB_CAPS_JUMBO)
4146 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
4147 else
4148 dev->max_mtu = ETH_DATA_LEN;
4149
Harini Katakam404cd082018-07-06 12:18:58 +05304150 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4151 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4152 if (val)
4153 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4154 macb_dma_desc_get_size(bp);
4155
4156 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4157 if (val)
4158 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4159 macb_dma_desc_get_size(bp);
4160 }
4161
Harini Katakame5010702019-01-29 15:20:03 +05304162 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4163 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4164 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4165
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004166 mac = of_get_mac_address(np);
Mike Looijmansaa076e32018-03-29 07:29:49 +02004167 if (mac) {
Moritz Fischereefb52d2016-03-29 19:11:14 -07004168 ether_addr_copy(bp->dev->dev_addr, mac);
Mike Looijmansaa076e32018-03-29 07:29:49 +02004169 } else {
Bartosz Golaszewskicce41b82018-11-30 09:20:58 +01004170 err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr);
Mike Looijmansaa076e32018-03-29 07:29:49 +02004171 if (err) {
4172 if (err == -EPROBE_DEFER)
4173 goto err_out_free_netdev;
4174 macb_get_hwaddr(bp);
4175 }
4176 }
frederic RODO6c36a702007-07-12 19:07:24 +02004177
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004178 err = of_get_phy_mode(np);
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01004179 if (err < 0) {
Jingoo Hanc607a0d2013-08-30 14:12:21 +09004180 pdata = dev_get_platdata(&pdev->dev);
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01004181 if (pdata && pdata->is_rmii)
4182 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
4183 else
4184 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4185 } else {
4186 bp->phy_interface = err;
4187 }
4188
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004189 /* IP specific init */
4190 err = init(pdev);
4191 if (err)
4192 goto err_out_free_netdev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004193
Florian Fainellicf669662016-05-02 18:38:45 -07004194 err = macb_mii_init(bp);
4195 if (err)
4196 goto err_out_free_netdev;
4197
Philippe Reynes0a912812016-06-22 00:32:35 +02004198 phydev = dev->phydev;
Florian Fainellicf669662016-05-02 18:38:45 -07004199
4200 netif_carrier_off(dev);
4201
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004202 err = register_netdev(dev);
4203 if (err) {
4204 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
Florian Fainellicf669662016-05-02 18:38:45 -07004205 goto err_out_unregister_mdio;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004206 }
4207
Harini Katakam032dc412018-01-27 12:09:01 +05304208 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4209 (unsigned long)bp);
4210
Florian Fainellicf669662016-05-02 18:38:45 -07004211 phy_attached_info(phydev);
Nicolas Ferre03fc4722012-07-03 23:14:13 +00004212
Bo Shen58798232014-09-13 01:57:49 +02004213 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4214 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4215 dev->base_addr, dev->irq, dev->dev_addr);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004216
Harini Katakamd54f89a2019-03-01 16:20:34 +05304217 pm_runtime_mark_last_busy(&bp->pdev->dev);
4218 pm_runtime_put_autosuspend(&bp->pdev->dev);
4219
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004220 return 0;
4221
Florian Fainellicf669662016-05-02 18:38:45 -07004222err_out_unregister_mdio:
Philippe Reynes0a912812016-06-22 00:32:35 +02004223 phy_disconnect(dev->phydev);
Florian Fainellicf669662016-05-02 18:38:45 -07004224 mdiobus_unregister(bp->mii_bus);
Michael Grzeschik66ee6a02017-11-08 09:56:35 +01004225 of_node_put(bp->phy_node);
Michael Grzeschik9ce98142017-11-08 09:56:34 +01004226 if (np && of_phy_is_fixed_link(np))
4227 of_phy_deregister_fixed_link(np);
Florian Fainellicf669662016-05-02 18:38:45 -07004228 mdiobus_free(bp->mii_bus);
4229
Cyrille Pitchencf250de2014-12-15 15:13:32 +01004230err_out_free_netdev:
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004231 free_netdev(dev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004232
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004233err_disable_clocks:
4234 clk_disable_unprepare(tx_clk);
4235 clk_disable_unprepare(hclk);
4236 clk_disable_unprepare(pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304237 clk_disable_unprepare(rx_clk);
Harini Katakamf5473d12019-03-01 16:20:33 +05304238 clk_disable_unprepare(tsu_clk);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304239 pm_runtime_disable(&pdev->dev);
4240 pm_runtime_set_suspended(&pdev->dev);
4241 pm_runtime_dont_use_autosuspend(&pdev->dev);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004242
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004243 return err;
4244}
4245
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004246static int macb_remove(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004247{
4248 struct net_device *dev;
4249 struct macb *bp;
Michael Grzeschik9ce98142017-11-08 09:56:34 +01004250 struct device_node *np = pdev->dev.of_node;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004251
4252 dev = platform_get_drvdata(pdev);
4253
4254 if (dev) {
4255 bp = netdev_priv(dev);
Philippe Reynes0a912812016-06-22 00:32:35 +02004256 if (dev->phydev)
4257 phy_disconnect(dev->phydev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07004258 mdiobus_unregister(bp->mii_bus);
Michael Grzeschik9ce98142017-11-08 09:56:34 +01004259 if (np && of_phy_is_fixed_link(np))
4260 of_phy_deregister_fixed_link(np);
Nathan Sullivanfa6114d2016-10-07 10:13:22 -05004261 dev->phydev = NULL;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07004262 mdiobus_free(bp->mii_bus);
Gregory CLEMENT5833e052015-12-11 11:34:53 +01004263
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004264 unregister_netdev(dev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304265 pm_runtime_disable(&pdev->dev);
4266 pm_runtime_dont_use_autosuspend(&pdev->dev);
4267 if (!pm_runtime_suspended(&pdev->dev)) {
4268 clk_disable_unprepare(bp->tx_clk);
4269 clk_disable_unprepare(bp->hclk);
4270 clk_disable_unprepare(bp->pclk);
4271 clk_disable_unprepare(bp->rx_clk);
4272 clk_disable_unprepare(bp->tsu_clk);
4273 pm_runtime_set_suspended(&pdev->dev);
4274 }
Michael Grzeschikdacdbb42017-06-23 16:54:10 +02004275 of_node_put(bp->phy_node);
Cyrille Pitchene965be72014-12-15 15:13:31 +01004276 free_netdev(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004277 }
4278
4279 return 0;
4280}
4281
Michal Simekd23823d2015-01-23 09:36:03 +01004282static int __maybe_unused macb_suspend(struct device *dev)
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004283{
Wolfram Sangce886a42018-10-21 22:00:14 +02004284 struct net_device *netdev = dev_get_drvdata(dev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004285 struct macb *bp = netdev_priv(netdev);
Harini Katakamde991c52019-03-01 16:20:35 +05304286 struct macb_queue *queue = bp->queues;
4287 unsigned long flags;
4288 unsigned int q;
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004289
Harini Katakamde991c52019-03-01 16:20:35 +05304290 if (!netif_running(netdev))
4291 return 0;
4292
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004293
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004294 if (bp->wol & MACB_WOL_ENABLED) {
4295 macb_writel(bp, IER, MACB_BIT(WOL));
4296 macb_writel(bp, WOL, MACB_BIT(MAG));
4297 enable_irq_wake(bp->queues[0].irq);
Harini Katakamde991c52019-03-01 16:20:35 +05304298 netif_device_detach(netdev);
4299 } else {
4300 netif_device_detach(netdev);
4301 for (q = 0, queue = bp->queues; q < bp->num_queues;
4302 ++q, ++queue)
4303 napi_disable(&queue->napi);
4304 phy_stop(netdev->phydev);
4305 phy_suspend(netdev->phydev);
4306 spin_lock_irqsave(&bp->lock, flags);
4307 macb_reset_hw(bp);
4308 spin_unlock_irqrestore(&bp->lock, flags);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304309 }
4310
Harini Katakamde991c52019-03-01 16:20:35 +05304311 netif_carrier_off(netdev);
4312 if (bp->ptp_info)
4313 bp->ptp_info->ptp_remove(netdev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304314 pm_runtime_force_suspend(dev);
4315
4316 return 0;
4317}
4318
4319static int __maybe_unused macb_resume(struct device *dev)
4320{
4321 struct net_device *netdev = dev_get_drvdata(dev);
4322 struct macb *bp = netdev_priv(netdev);
Harini Katakamde991c52019-03-01 16:20:35 +05304323 struct macb_queue *queue = bp->queues;
4324 unsigned int q;
4325
4326 if (!netif_running(netdev))
4327 return 0;
Harini Katakamd54f89a2019-03-01 16:20:34 +05304328
4329 pm_runtime_force_resume(dev);
4330
4331 if (bp->wol & MACB_WOL_ENABLED) {
4332 macb_writel(bp, IDR, MACB_BIT(WOL));
4333 macb_writel(bp, WOL, 0);
4334 disable_irq_wake(bp->queues[0].irq);
Harini Katakamde991c52019-03-01 16:20:35 +05304335 } else {
4336 macb_writel(bp, NCR, MACB_BIT(MPE));
4337 for (q = 0, queue = bp->queues; q < bp->num_queues;
4338 ++q, ++queue)
4339 napi_enable(&queue->napi);
4340 phy_resume(netdev->phydev);
4341 phy_init_hw(netdev->phydev);
4342 phy_start(netdev->phydev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304343 }
4344
Harini Katakamde991c52019-03-01 16:20:35 +05304345 bp->macbgem_ops.mog_init_rings(bp);
4346 macb_init_hw(bp);
4347 macb_set_rx_mode(netdev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304348 netif_device_attach(netdev);
Harini Katakamde991c52019-03-01 16:20:35 +05304349 if (bp->ptp_info)
4350 bp->ptp_info->ptp_init(netdev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304351
4352 return 0;
4353}
4354
4355static int __maybe_unused macb_runtime_suspend(struct device *dev)
4356{
4357 struct platform_device *pdev = to_platform_device(dev);
4358 struct net_device *netdev = platform_get_drvdata(pdev);
4359 struct macb *bp = netdev_priv(netdev);
4360
4361 if (!(device_may_wakeup(&bp->dev->dev))) {
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004362 clk_disable_unprepare(bp->tx_clk);
4363 clk_disable_unprepare(bp->hclk);
4364 clk_disable_unprepare(bp->pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304365 clk_disable_unprepare(bp->rx_clk);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004366 }
Harini Katakamf5473d12019-03-01 16:20:33 +05304367 clk_disable_unprepare(bp->tsu_clk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004368
4369 return 0;
4370}
4371
Harini Katakamd54f89a2019-03-01 16:20:34 +05304372static int __maybe_unused macb_runtime_resume(struct device *dev)
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004373{
Harini Katakamd54f89a2019-03-01 16:20:34 +05304374 struct platform_device *pdev = to_platform_device(dev);
4375 struct net_device *netdev = platform_get_drvdata(pdev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004376 struct macb *bp = netdev_priv(netdev);
4377
Harini Katakamd54f89a2019-03-01 16:20:34 +05304378 if (!(device_may_wakeup(&bp->dev->dev))) {
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004379 clk_prepare_enable(bp->pclk);
4380 clk_prepare_enable(bp->hclk);
4381 clk_prepare_enable(bp->tx_clk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304382 clk_prepare_enable(bp->rx_clk);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004383 }
Harini Katakamf5473d12019-03-01 16:20:33 +05304384 clk_prepare_enable(bp->tsu_clk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004385
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004386 return 0;
4387}
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004388
Harini Katakamd54f89a2019-03-01 16:20:34 +05304389static const struct dev_pm_ops macb_pm_ops = {
4390 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4391 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4392};
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004393
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004394static struct platform_driver macb_driver = {
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004395 .probe = macb_probe,
4396 .remove = macb_remove,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004397 .driver = {
4398 .name = "macb",
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01004399 .of_match_table = of_match_ptr(macb_dt_ids),
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004400 .pm = &macb_pm_ops,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004401 },
4402};
4403
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004404module_platform_driver(macb_driver);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004405
4406MODULE_LICENSE("GPL");
Jamie Ilesf75ba502011-11-08 10:12:32 +00004407MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02004408MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Kay Sievers72abb462008-04-18 13:50:44 -07004409MODULE_ALIAS("platform:macb");