blob: 7ea19e17333944846a5c1c6eeac3c84f67a611a9 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jan Ceuleers0977f812012-06-05 03:42:12 +00002/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Claudiu Manoil20862782014-02-17 12:53:14 +020013 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000014 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * Gianfar: AKA Lambda Draconis, "Dragon"
17 * RA 11 31 24.2
18 * Dec +69 19 52
19 * V 3.84
20 * B-V +1.62
21 *
22 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050023 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080024 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 *
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050029 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 * last descriptor of the ring.
32 *
33 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040038 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020040 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050041 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
48 * skb.
49 *
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
58 */
59
Joe Perches59deab22011-06-14 08:57:47 +000060#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61#define DEBUG
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/string.h>
65#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040066#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/slab.h>
68#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/delay.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050073#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/spinlock.h>
75#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050076#include <linux/of_address.h>
77#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000078#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080079#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050080#include <linux/ip.h>
81#include <linux/tcp.h>
82#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080083#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000084#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86#include <asm/io.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030087#ifdef CONFIG_PPC
Anton Vorontsov7d350972010-06-30 06:39:12 +000088#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030089#include <asm/mpc85xx.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030090#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#include <asm/irq.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080092#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/dma-mapping.h>
95#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040096#include <linux/mii.h>
97#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080098#include <linux/phy_fixed.h>
99#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700100#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Abhimanyu8fcc6032015-10-27 14:17:43 +0530104#define TX_TIMEOUT (5*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Claudiu Manoil75354142015-07-13 16:22:06 +0300106const char gfar_driver_version[] = "2.0";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static int gfar_enet_open(struct net_device *dev);
YueHaibing06983aa2018-09-21 10:50:32 +0800109static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200110static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static void gfar_timeout(struct net_device *dev);
112static int gfar_close(struct net_device *dev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300113static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
114 int alloc_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static int gfar_set_mac_address(struct net_device *dev);
116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100117static irqreturn_t gfar_error(int irq, void *dev_id);
118static irqreturn_t gfar_transmit(int irq, void *dev_id);
119static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120static void adjust_link(struct net_device *dev);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +0300121static noinline void gfar_update_link_state(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700123static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600124static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400125static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static void gfar_set_multi(struct net_device *dev);
127static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500128static void gfar_configure_serdes(struct net_device *dev);
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200129static int gfar_poll_rx(struct napi_struct *napi, int budget);
130static int gfar_poll_tx(struct napi_struct *napi, int budget);
131static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
132static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300133#ifdef CONFIG_NET_POLL_CONTROLLER
134static void gfar_netpoll(struct net_device *dev);
135#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000137static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300138static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
Claudiu Manoilc10650b2014-02-17 12:53:18 +0200139static void gfar_halt_nodisable(struct gfar_private *priv);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600140static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800141static void gfar_set_mac_for_addr(struct net_device *dev, int num,
142 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000143static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145MODULE_AUTHOR("Freescale Semiconductor, Inc");
146MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147MODULE_LICENSE("GPL");
148
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000149static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000150 dma_addr_t buf)
151{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000152 u32 lstatus;
153
Claudiu Manoila7312d52015-03-13 10:36:28 +0200154 bdp->bufPtr = cpu_to_be32(buf);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000155
156 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000157 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000158 lstatus |= BD_LFLAG(RXBD_WRAP);
159
Claudiu Manoild55398b2014-10-07 10:44:35 +0300160 gfar_wmb();
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161
Claudiu Manoila7312d52015-03-13 10:36:28 +0200162 bdp->lstatus = cpu_to_be32(lstatus);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000163}
164
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300165static void gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000166{
Anton Vorontsov87283272009-10-12 06:00:39 +0000167 struct gfar_private *priv = netdev_priv(ndev);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200168 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000169 struct gfar_priv_tx_q *tx_queue = NULL;
170 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000171 struct txbd8 *txbdp;
Kevin Hao03366a332014-12-24 14:05:45 +0800172 u32 __iomem *rfbptr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000173 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000174
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000175 for (i = 0; i < priv->num_tx_queues; i++) {
176 tx_queue = priv->tx_queue[i];
177 /* Initialize some variables in our dev structure */
178 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
179 tx_queue->dirty_tx = tx_queue->tx_bd_base;
180 tx_queue->cur_tx = tx_queue->tx_bd_base;
181 tx_queue->skb_curtx = 0;
182 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000183
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000184 /* Initialize Transmit Descriptor Ring */
185 txbdp = tx_queue->tx_bd_base;
186 for (j = 0; j < tx_queue->tx_ring_size; j++) {
187 txbdp->lstatus = 0;
188 txbdp->bufPtr = 0;
189 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000190 }
191
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000192 /* Set the last descriptor in the ring to indicate wrap */
193 txbdp--;
Claudiu Manoila7312d52015-03-13 10:36:28 +0200194 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
195 TXBD_WRAP);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000196 }
197
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200198 rfbptr = &regs->rfbptr0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000201
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300202 rx_queue->next_to_clean = 0;
203 rx_queue->next_to_use = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +0300204 rx_queue->next_to_alloc = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000205
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300206 /* make sure next_to_clean != next_to_use after this
207 * by leaving at least 1 unused descriptor
208 */
209 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000210
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200211 rx_queue->rfbptr = rfbptr;
212 rfbptr += 2;
Anton Vorontsov87283272009-10-12 06:00:39 +0000213 }
Anton Vorontsov87283272009-10-12 06:00:39 +0000214}
215
216static int gfar_alloc_skb_resources(struct net_device *ndev)
217{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000218 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000219 dma_addr_t addr;
Claudiu Manoil75354142015-07-13 16:22:06 +0300220 int i, j;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000221 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000222 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000223 struct gfar_priv_tx_q *tx_queue = NULL;
224 struct gfar_priv_rx_q *rx_queue = NULL;
225
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000226 priv->total_tx_ring_size = 0;
227 for (i = 0; i < priv->num_tx_queues; i++)
228 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
229
230 priv->total_rx_ring_size = 0;
231 for (i = 0; i < priv->num_rx_queues; i++)
232 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000233
234 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000235 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000236 (priv->total_tx_ring_size *
237 sizeof(struct txbd8)) +
238 (priv->total_rx_ring_size *
239 sizeof(struct rxbd8)),
240 &addr, GFP_KERNEL);
241 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000242 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000243
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000244 for (i = 0; i < priv->num_tx_queues; i++) {
245 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000246 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000247 tx_queue->tx_bd_dma_base = addr;
248 tx_queue->dev = ndev;
249 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000250 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
251 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000252 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000253
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000254 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000255 for (i = 0; i < priv->num_rx_queues; i++) {
256 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000257 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000258 rx_queue->rx_bd_dma_base = addr;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300259 rx_queue->ndev = ndev;
Claudiu Manoil75354142015-07-13 16:22:06 +0300260 rx_queue->dev = dev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000261 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
262 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000263 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000264
265 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000266 for (i = 0; i < priv->num_tx_queues; i++) {
267 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000268 tx_queue->tx_skbuff =
269 kmalloc_array(tx_queue->tx_ring_size,
270 sizeof(*tx_queue->tx_skbuff),
271 GFP_KERNEL);
272 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000273 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000274
Claudiu Manoil75354142015-07-13 16:22:06 +0300275 for (j = 0; j < tx_queue->tx_ring_size; j++)
276 tx_queue->tx_skbuff[j] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000277 }
278
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000279 for (i = 0; i < priv->num_rx_queues; i++) {
280 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +0300281 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
282 sizeof(*rx_queue->rx_buff),
283 GFP_KERNEL);
284 if (!rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000285 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000286 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000287
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300288 gfar_init_bds(ndev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000289
290 return 0;
291
292cleanup:
293 free_skb_resources(priv);
294 return -ENOMEM;
295}
296
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000297static void gfar_init_tx_rx_base(struct gfar_private *priv)
298{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000299 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000300 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301 int i;
302
303 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000304 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000305 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000306 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000307 }
308
309 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000310 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000311 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000312 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000313 }
314}
315
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200316static void gfar_init_rqprm(struct gfar_private *priv)
317{
318 struct gfar __iomem *regs = priv->gfargrp[0].regs;
319 u32 __iomem *baddr;
320 int i;
321
322 baddr = &regs->rqprm0;
323 for (i = 0; i < priv->num_rx_queues; i++) {
324 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
325 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
326 baddr++;
327 }
328}
329
Claudiu Manoil75354142015-07-13 16:22:06 +0300330static void gfar_rx_offload_en(struct gfar_private *priv)
Claudiu Manoil88302642014-02-24 12:13:43 +0200331{
Claudiu Manoil88302642014-02-24 12:13:43 +0200332 /* set this when rx hw offload (TOE) functions are being used */
333 priv->uses_rxfcb = 0;
334
335 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
336 priv->uses_rxfcb = 1;
337
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300338 if (priv->hwts_rx_en || priv->rx_filer_enable)
Claudiu Manoil88302642014-02-24 12:13:43 +0200339 priv->uses_rxfcb = 1;
Claudiu Manoil88302642014-02-24 12:13:43 +0200340}
341
Claudiu Manoila328ac92014-02-24 12:13:42 +0200342static void gfar_mac_rx_config(struct gfar_private *priv)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000343{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000344 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000345 u32 rctrl = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000346
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000347 if (priv->rx_filer_enable) {
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300348 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000349 /* Program the RIR0 reg with the required distribution */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200350 if (priv->poll_mode == GFAR_SQ_POLLING)
351 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
352 else /* GFAR_MQ_POLLING */
353 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000354 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000355
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000356 /* Restore PROMISC mode */
Claudiu Manoila328ac92014-02-24 12:13:42 +0200357 if (priv->ndev->flags & IFF_PROMISC)
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000358 rctrl |= RCTRL_PROM;
359
Claudiu Manoil88302642014-02-24 12:13:43 +0200360 if (priv->ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000361 rctrl |= RCTRL_CHECKSUMMING;
362
Claudiu Manoil88302642014-02-24 12:13:43 +0200363 if (priv->extended_hash)
364 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000365
366 if (priv->padding) {
367 rctrl &= ~RCTRL_PAL_MASK;
368 rctrl |= RCTRL_PADDING(priv->padding);
369 }
370
Manfred Rudigier97553f72010-06-11 01:49:05 +0000371 /* Enable HW time stamping if requested from user space */
Claudiu Manoil88302642014-02-24 12:13:43 +0200372 if (priv->hwts_rx_en)
Manfred Rudigier97553f72010-06-11 01:49:05 +0000373 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
374
Claudiu Manoil88302642014-02-24 12:13:43 +0200375 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000376 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000377
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200378 /* Clear the LFC bit */
379 gfar_write(&regs->rctrl, rctrl);
380 /* Init flow control threshold values */
381 gfar_init_rqprm(priv);
382 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
383 rctrl |= RCTRL_LFC;
384
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000385 /* Init rctrl based on our settings */
386 gfar_write(&regs->rctrl, rctrl);
Claudiu Manoila328ac92014-02-24 12:13:42 +0200387}
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000388
Claudiu Manoila328ac92014-02-24 12:13:42 +0200389static void gfar_mac_tx_config(struct gfar_private *priv)
390{
391 struct gfar __iomem *regs = priv->gfargrp[0].regs;
392 u32 tctrl = 0;
393
394 if (priv->ndev->features & NETIF_F_IP_CSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000395 tctrl |= TCTRL_INIT_CSUM;
396
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000397 if (priv->prio_sched_en)
398 tctrl |= TCTRL_TXSCHED_PRIO;
399 else {
400 tctrl |= TCTRL_TXSCHED_WRRS;
401 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
402 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
403 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000404
Claudiu Manoil88302642014-02-24 12:13:43 +0200405 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
406 tctrl |= TCTRL_VLINS;
407
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000408 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000409}
410
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200411static void gfar_configure_coalescing(struct gfar_private *priv,
412 unsigned long tx_mask, unsigned long rx_mask)
413{
414 struct gfar __iomem *regs = priv->gfargrp[0].regs;
415 u32 __iomem *baddr;
416
417 if (priv->mode == MQ_MG_MODE) {
418 int i = 0;
419
420 baddr = &regs->txic0;
421 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
422 gfar_write(baddr + i, 0);
423 if (likely(priv->tx_queue[i]->txcoalescing))
424 gfar_write(baddr + i, priv->tx_queue[i]->txic);
425 }
426
427 baddr = &regs->rxic0;
428 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
429 gfar_write(baddr + i, 0);
430 if (likely(priv->rx_queue[i]->rxcoalescing))
431 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
432 }
433 } else {
434 /* Backward compatible case -- even if we enable
435 * multiple queues, there's only single reg to program
436 */
437 gfar_write(&regs->txic, 0);
438 if (likely(priv->tx_queue[0]->txcoalescing))
439 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
440
441 gfar_write(&regs->rxic, 0);
442 if (unlikely(priv->rx_queue[0]->rxcoalescing))
443 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
444 }
445}
446
447void gfar_configure_coalescing_all(struct gfar_private *priv)
448{
449 gfar_configure_coalescing(priv, 0xFF, 0xFF);
450}
451
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000452static struct net_device_stats *gfar_get_stats(struct net_device *dev)
453{
454 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000455 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
456 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000457 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000458
459 for (i = 0; i < priv->num_rx_queues; i++) {
460 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000461 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000462 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
463 }
464
465 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000466 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000467 dev->stats.rx_dropped = rx_dropped;
468
469 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000470 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
471 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000472 }
473
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000474 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000475 dev->stats.tx_packets = tx_packets;
476
477 return &dev->stats;
478}
479
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300480static int gfar_set_mac_addr(struct net_device *dev, void *p)
481{
482 eth_mac_addr(dev, p);
483
484 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
485
486 return 0;
487}
488
Andy Fleming26ccfc32009-03-10 12:58:28 +0000489static const struct net_device_ops gfar_netdev_ops = {
490 .ndo_open = gfar_enet_open,
491 .ndo_start_xmit = gfar_start_xmit,
492 .ndo_stop = gfar_close,
493 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000494 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000495 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000496 .ndo_tx_timeout = gfar_timeout,
497 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000498 .ndo_get_stats = gfar_get_stats,
Joakim Tjernlund6211d462018-12-14 15:17:07 +0100499 .ndo_change_carrier = fixed_phy_change_carrier,
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300500 .ndo_set_mac_address = gfar_set_mac_addr,
Ben Hutchings240c1022009-07-09 17:54:35 +0000501 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000502#ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = gfar_netpoll,
504#endif
505};
506
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200507static void gfar_ints_disable(struct gfar_private *priv)
508{
509 int i;
510 for (i = 0; i < priv->num_grps; i++) {
511 struct gfar __iomem *regs = priv->gfargrp[i].regs;
512 /* Clear IEVENT */
513 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
514
515 /* Initialize IMASK */
516 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
517 }
518}
519
520static void gfar_ints_enable(struct gfar_private *priv)
521{
522 int i;
523 for (i = 0; i < priv->num_grps; i++) {
524 struct gfar __iomem *regs = priv->gfargrp[i].regs;
525 /* Unmask the interrupts we look for */
526 gfar_write(&regs->imask, IMASK_DEFAULT);
527 }
528}
529
Claudiu Manoil20862782014-02-17 12:53:14 +0200530static int gfar_alloc_tx_queues(struct gfar_private *priv)
531{
532 int i;
533
534 for (i = 0; i < priv->num_tx_queues; i++) {
535 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
536 GFP_KERNEL);
537 if (!priv->tx_queue[i])
538 return -ENOMEM;
539
540 priv->tx_queue[i]->tx_skbuff = NULL;
541 priv->tx_queue[i]->qindex = i;
542 priv->tx_queue[i]->dev = priv->ndev;
543 spin_lock_init(&(priv->tx_queue[i]->txlock));
544 }
545 return 0;
546}
547
548static int gfar_alloc_rx_queues(struct gfar_private *priv)
549{
550 int i;
551
552 for (i = 0; i < priv->num_rx_queues; i++) {
553 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
554 GFP_KERNEL);
555 if (!priv->rx_queue[i])
556 return -ENOMEM;
557
Claudiu Manoil20862782014-02-17 12:53:14 +0200558 priv->rx_queue[i]->qindex = i;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300559 priv->rx_queue[i]->ndev = priv->ndev;
Claudiu Manoil20862782014-02-17 12:53:14 +0200560 }
561 return 0;
562}
563
564static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000565{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000566 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000567
568 for (i = 0; i < priv->num_tx_queues; i++)
569 kfree(priv->tx_queue[i]);
570}
571
Claudiu Manoil20862782014-02-17 12:53:14 +0200572static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000573{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000574 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000575
576 for (i = 0; i < priv->num_rx_queues; i++)
577 kfree(priv->rx_queue[i]);
578}
579
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000580static void unmap_group_regs(struct gfar_private *priv)
581{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000582 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000583
584 for (i = 0; i < MAXGROUPS; i++)
585 if (priv->gfargrp[i].regs)
586 iounmap(priv->gfargrp[i].regs);
587}
588
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000589static void free_gfar_dev(struct gfar_private *priv)
590{
591 int i, j;
592
593 for (i = 0; i < priv->num_grps; i++)
594 for (j = 0; j < GFAR_NUM_IRQS; j++) {
595 kfree(priv->gfargrp[i].irqinfo[j]);
596 priv->gfargrp[i].irqinfo[j] = NULL;
597 }
598
599 free_netdev(priv->ndev);
600}
601
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000602static void disable_napi(struct gfar_private *priv)
603{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000604 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000605
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200606 for (i = 0; i < priv->num_grps; i++) {
607 napi_disable(&priv->gfargrp[i].napi_rx);
608 napi_disable(&priv->gfargrp[i].napi_tx);
609 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000610}
611
612static void enable_napi(struct gfar_private *priv)
613{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000614 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000615
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200616 for (i = 0; i < priv->num_grps; i++) {
617 napi_enable(&priv->gfargrp[i].napi_rx);
618 napi_enable(&priv->gfargrp[i].napi_tx);
619 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000620}
621
622static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000623 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000624{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000625 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000626 int i;
627
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000628 for (i = 0; i < GFAR_NUM_IRQS; i++) {
629 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
630 GFP_KERNEL);
631 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000632 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000633 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000634
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000635 grp->regs = of_iomap(np, 0);
636 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000637 return -ENOMEM;
638
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000639 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000640
641 /* If we aren't the FEC we have multiple interrupts */
642 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000643 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
644 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
Mark Brownfea0f662015-11-26 11:59:45 +0000645 if (!gfar_irq(grp, TX)->irq ||
646 !gfar_irq(grp, RX)->irq ||
647 !gfar_irq(grp, ER)->irq)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000648 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000649 }
650
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000651 grp->priv = priv;
652 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000653 if (priv->mode == MQ_MG_MODE) {
Jingchang Lu55917642015-03-13 10:52:32 +0200654 u32 rxq_mask, txq_mask;
655 int ret;
656
657 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
658 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
659
660 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
661 if (!ret) {
662 grp->rx_bit_map = rxq_mask ?
663 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
664 }
665
666 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
667 if (!ret) {
668 grp->tx_bit_map = txq_mask ?
669 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
670 }
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200671
672 if (priv->poll_mode == GFAR_SQ_POLLING) {
673 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
674 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
675 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200676 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000677 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000678 grp->rx_bit_map = 0xFF;
679 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000680 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200681
682 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
683 * right to left, so we need to revert the 8 bits to get the q index
684 */
685 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
686 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
687
688 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
689 * also assign queues to groups
690 */
691 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200692 if (!grp->rx_queue)
693 grp->rx_queue = priv->rx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200694 grp->num_rx_queues++;
695 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
696 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
697 priv->rx_queue[i]->grp = grp;
698 }
699
700 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200701 if (!grp->tx_queue)
702 grp->tx_queue = priv->tx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200703 grp->num_tx_queues++;
704 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
705 priv->tqueue |= (TQUEUE_EN0 >> i);
706 priv->tx_queue[i]->grp = grp;
707 }
708
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000709 priv->num_grps++;
710
711 return 0;
712}
713
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100714static int gfar_of_group_count(struct device_node *np)
715{
716 struct device_node *child;
717 int num = 0;
718
719 for_each_available_child_of_node(np, child)
Rob Herringbf5849f2018-12-05 13:50:32 -0600720 if (of_node_name_eq(child, "queue-group"))
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100721 num++;
722
723 return num;
724}
725
Grant Likely2dc11582010-08-06 09:25:50 -0600726static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800727{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800728 const char *model;
729 const char *ctype;
730 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000731 int err = 0, i;
732 struct net_device *dev = NULL;
733 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700734 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000735 struct device_node *child = NULL;
Jingchang Lu55917642015-03-13 10:52:32 +0200736 u32 stash_len = 0;
737 u32 stash_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000738 unsigned int num_tx_qs, num_rx_qs;
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200739 unsigned short mode, poll_mode;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800740
Kevin Hao4b222ca2015-01-28 20:06:48 +0800741 if (!np)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800742 return -ENODEV;
743
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200744 if (of_device_is_compatible(np, "fsl,etsec2")) {
745 mode = MQ_MG_MODE;
746 poll_mode = GFAR_SQ_POLLING;
747 } else {
748 mode = SQ_SG_MODE;
749 poll_mode = GFAR_SQ_POLLING;
750 }
751
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200752 if (mode == SQ_SG_MODE) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200753 num_tx_qs = 1;
754 num_rx_qs = 1;
755 } else { /* MQ_MG_MODE */
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200756 /* get the actual number of supported groups */
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100757 unsigned int num_grps = gfar_of_group_count(np);
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200758
759 if (num_grps == 0 || num_grps > MAXGROUPS) {
760 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
761 num_grps);
762 pr_err("Cannot do alloc_etherdev, aborting\n");
763 return -EINVAL;
764 }
765
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200766 if (poll_mode == GFAR_SQ_POLLING) {
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200767 num_tx_qs = num_grps; /* one txq per int group */
768 num_rx_qs = num_grps; /* one rxq per int group */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200769 } else { /* GFAR_MQ_POLLING */
Jingchang Lu55917642015-03-13 10:52:32 +0200770 u32 tx_queues, rx_queues;
771 int ret;
772
773 /* parse the num of HW tx and rx queues */
774 ret = of_property_read_u32(np, "fsl,num_tx_queues",
775 &tx_queues);
776 num_tx_qs = ret ? 1 : tx_queues;
777
778 ret = of_property_read_u32(np, "fsl,num_rx_queues",
779 &rx_queues);
780 num_rx_qs = ret ? 1 : rx_queues;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200781 }
782 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000783
784 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000785 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
786 num_tx_qs, MAX_TX_QS);
787 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000788 return -EINVAL;
789 }
790
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000791 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000792 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
793 num_rx_qs, MAX_RX_QS);
794 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000795 return -EINVAL;
796 }
797
798 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
799 dev = *pdev;
800 if (NULL == dev)
801 return -ENOMEM;
802
803 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000804 priv->ndev = dev;
805
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200806 priv->mode = mode;
807 priv->poll_mode = poll_mode;
808
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000809 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000810 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000811 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200812
813 err = gfar_alloc_tx_queues(priv);
814 if (err)
815 goto tx_alloc_failed;
816
817 err = gfar_alloc_rx_queues(priv);
818 if (err)
819 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800820
Jingchang Lu55917642015-03-13 10:52:32 +0200821 err = of_property_read_string(np, "model", &model);
822 if (err) {
823 pr_err("Device model property missing, aborting\n");
824 goto rx_alloc_failed;
825 }
826
Jan Ceuleers0977f812012-06-05 03:42:12 +0000827 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700828 INIT_LIST_HEAD(&priv->rx_list.list);
829 priv->rx_list.count = 0;
830 mutex_init(&priv->rx_queue_access);
831
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000832 for (i = 0; i < MAXGROUPS; i++)
833 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800834
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000835 /* Parse and initialize group specific information */
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200836 if (priv->mode == MQ_MG_MODE) {
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100837 for_each_available_child_of_node(np, child) {
Rob Herringbf5849f2018-12-05 13:50:32 -0600838 if (!of_node_name_eq(child, "queue-group"))
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100839 continue;
840
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000841 err = gfar_parse_group(child, priv, model);
842 if (err)
843 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800844 }
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200845 } else { /* SQ_SG_MODE */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000846 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000847 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000848 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800849 }
850
Saurabh Sengar3f8c0f72015-11-20 23:23:58 +0530851 if (of_property_read_bool(np, "bd-stash")) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800852 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
853 priv->bd_stash_en = 1;
854 }
855
Jingchang Lu55917642015-03-13 10:52:32 +0200856 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800857
Jingchang Lu55917642015-03-13 10:52:32 +0200858 if (err == 0)
859 priv->rx_stash_size = stash_len;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800860
Jingchang Lu55917642015-03-13 10:52:32 +0200861 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800862
Jingchang Lu55917642015-03-13 10:52:32 +0200863 if (err == 0)
864 priv->rx_stash_index = stash_idx;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800865
866 if (stash_len || stash_idx)
867 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
868
Andy Flemingb31a1d82008-12-16 15:29:15 -0800869 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000870
Petr Å tetiara51645f2019-05-06 23:27:04 +0200871 if (!IS_ERR(mac_addr))
Petr Å tetiar2d2924a2019-05-10 11:35:17 +0200872 ether_addr_copy(dev->dev_addr, mac_addr);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800873
874 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200875 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000876 FSL_GIANFAR_DEV_HAS_COALESCE |
877 FSL_GIANFAR_DEV_HAS_RMON |
878 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
879
Andy Flemingb31a1d82008-12-16 15:29:15 -0800880 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200881 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000882 FSL_GIANFAR_DEV_HAS_COALESCE |
883 FSL_GIANFAR_DEV_HAS_RMON |
884 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000885 FSL_GIANFAR_DEV_HAS_CSUM |
886 FSL_GIANFAR_DEV_HAS_VLAN |
887 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
888 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
Hamish Martin7bff47d2015-12-15 14:14:50 +1300889 FSL_GIANFAR_DEV_HAS_TIMER |
890 FSL_GIANFAR_DEV_HAS_RX_FILER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800891
Jingchang Lu55917642015-03-13 10:52:32 +0200892 err = of_property_read_string(np, "phy-connection-type", &ctype);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800893
894 /* We only care about rgmii-id. The rest are autodetected */
Jingchang Lu55917642015-03-13 10:52:32 +0200895 if (err == 0 && !strcmp(ctype, "rgmii-id"))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800896 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
897 else
898 priv->interface = PHY_INTERFACE_MODE_MII;
899
Jingchang Lu55917642015-03-13 10:52:32 +0200900 if (of_find_property(np, "fsl,magic-packet", NULL))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800901 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
902
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300903 if (of_get_property(np, "fsl,wake-on-filer", NULL))
904 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
905
Grant Likelyfe192a42009-04-25 12:53:12 +0000906 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800907
Florian Fainellibe403642014-05-22 09:47:48 -0700908 /* In the case of a fixed PHY, the DT node associated
909 * to the PHY is the Ethernet MAC DT node.
910 */
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200911 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
Florian Fainellibe403642014-05-22 09:47:48 -0700912 err = of_phy_register_fixed_link(np);
913 if (err)
914 goto err_grp_init;
915
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200916 priv->phy_node = of_node_get(np);
Florian Fainellibe403642014-05-22 09:47:48 -0700917 }
918
Andy Flemingb31a1d82008-12-16 15:29:15 -0800919 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000920 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800921
922 return 0;
923
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000924err_grp_init:
925 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200926rx_alloc_failed:
927 gfar_free_rx_queues(priv);
928tx_alloc_failed:
929 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000930 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800931 return err;
932}
933
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000934static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000935{
936 struct hwtstamp_config config;
937 struct gfar_private *priv = netdev_priv(netdev);
938
939 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
940 return -EFAULT;
941
942 /* reserved for future extensions */
943 if (config.flags)
944 return -EINVAL;
945
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000946 switch (config.tx_type) {
947 case HWTSTAMP_TX_OFF:
948 priv->hwts_tx_en = 0;
949 break;
950 case HWTSTAMP_TX_ON:
951 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
952 return -ERANGE;
953 priv->hwts_tx_en = 1;
954 break;
955 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000956 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000957 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000958
959 switch (config.rx_filter) {
960 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000961 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000962 priv->hwts_rx_en = 0;
Claudiu Manoil08511332014-02-24 12:13:45 +0200963 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000964 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000965 break;
966 default:
967 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
968 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000969 if (!priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000970 priv->hwts_rx_en = 1;
Claudiu Manoil08511332014-02-24 12:13:45 +0200971 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000972 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000973 config.rx_filter = HWTSTAMP_FILTER_ALL;
974 break;
975 }
976
977 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
978 -EFAULT : 0;
979}
980
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000981static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
982{
983 struct hwtstamp_config config;
984 struct gfar_private *priv = netdev_priv(netdev);
985
986 config.flags = 0;
987 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
988 config.rx_filter = (priv->hwts_rx_en ?
989 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
990
991 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
992 -EFAULT : 0;
993}
994
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000995static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
996{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200997 struct phy_device *phydev = dev->phydev;
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000998
999 if (!netif_running(dev))
1000 return -EINVAL;
1001
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001002 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001003 return gfar_hwtstamp_set(dev, rq);
1004 if (cmd == SIOCGHWTSTAMP)
1005 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001006
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001007 if (!phydev)
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001008 return -ENODEV;
1009
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001010 return phy_mii_ioctl(phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001011}
1012
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001013static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1014 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001015{
1016 u32 rqfpr = FPR_FILER_MASK;
1017 u32 rqfcr = 0x0;
1018
1019 rqfar--;
1020 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001021 priv->ftp_rqfpr[rqfar] = rqfpr;
1022 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001023 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1024
1025 rqfar--;
1026 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001027 priv->ftp_rqfpr[rqfar] = rqfpr;
1028 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001029 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1030
1031 rqfar--;
1032 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1033 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001034 priv->ftp_rqfcr[rqfar] = rqfcr;
1035 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001036 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1037
1038 rqfar--;
1039 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1040 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001041 priv->ftp_rqfcr[rqfar] = rqfcr;
1042 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001043 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1044
1045 return rqfar;
1046}
1047
1048static void gfar_init_filer_table(struct gfar_private *priv)
1049{
1050 int i = 0x0;
1051 u32 rqfar = MAX_FILER_IDX;
1052 u32 rqfcr = 0x0;
1053 u32 rqfpr = FPR_FILER_MASK;
1054
1055 /* Default rule */
1056 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001057 priv->ftp_rqfcr[rqfar] = rqfcr;
1058 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001059 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1060
1061 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1062 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1063 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1064 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1065 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1067
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +02001068 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001069 priv->cur_filer_idx = rqfar;
1070
1071 /* Rest are masked rules */
1072 rqfcr = RQFCR_CMP_NOMATCH;
1073 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001074 priv->ftp_rqfcr[i] = rqfcr;
1075 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001076 gfar_write_filer(priv, i, rqfcr, rqfpr);
1077 }
1078}
1079
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001080#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001081static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +00001082{
Anton Vorontsov7d350972010-06-30 06:39:12 +00001083 unsigned int pvr = mfspr(SPRN_PVR);
1084 unsigned int svr = mfspr(SPRN_SVR);
1085 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1086 unsigned int rev = svr & 0xffff;
1087
1088 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1089 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001090 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001091 priv->errata |= GFAR_ERRATA_74;
1092
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001093 /* MPC8313 and MPC837x all rev */
1094 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001095 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001096 priv->errata |= GFAR_ERRATA_76;
1097
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001098 /* MPC8313 Rev < 2.0 */
1099 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001100 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001101}
1102
1103static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1104{
1105 unsigned int svr = mfspr(SPRN_SVR);
1106
1107 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1108 priv->errata |= GFAR_ERRATA_12;
Atsushi Nemoto7bfc6082016-03-03 09:07:51 +09001109 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
Claudiu Manoil53fad772013-10-09 20:20:42 +03001110 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
Atsushi Nemoto7bfc6082016-03-03 09:07:51 +09001111 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1112 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
Claudiu Manoil53fad772013-10-09 20:20:42 +03001113 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001114}
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001115#endif
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001116
1117static void gfar_detect_errata(struct gfar_private *priv)
1118{
1119 struct device *dev = &priv->ofdev->dev;
1120
1121 /* no plans to fix */
1122 priv->errata |= GFAR_ERRATA_A002;
1123
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001124#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001125 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1126 __gfar_detect_errata_85xx(priv);
1127 else /* non-mpc85xx parts, i.e. e300 core based */
1128 __gfar_detect_errata_83xx(priv);
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001129#endif
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001130
Anton Vorontsov7d350972010-06-30 06:39:12 +00001131 if (priv->errata)
1132 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1133 priv->errata);
1134}
1135
Claudiu Manoil08511332014-02-24 12:13:45 +02001136void gfar_mac_reset(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
Claudiu Manoil20862782014-02-17 12:53:14 +02001138 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoila328ac92014-02-24 12:13:42 +02001139 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
1141 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001142 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Andy Flemingb98ac702009-02-04 16:38:05 -08001144 /* We need to delay at least 3 TX clocks */
Claudiu Manoila328ac92014-02-24 12:13:42 +02001145 udelay(3);
Andy Flemingb98ac702009-02-04 16:38:05 -08001146
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001147 /* the soft reset bit is not self-resetting, so we need to
1148 * clear it before resuming normal operation
1149 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001150 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Claudiu Manoila328ac92014-02-24 12:13:42 +02001152 udelay(3);
1153
Claudiu Manoil75354142015-07-13 16:22:06 +03001154 gfar_rx_offload_en(priv);
Claudiu Manoil88302642014-02-24 12:13:43 +02001155
1156 /* Initialize the max receive frame/buffer lengths */
Claudiu Manoil75354142015-07-13 16:22:06 +03001157 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1158 gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
Claudiu Manoila328ac92014-02-24 12:13:42 +02001159
1160 /* Initialize the Minimum Frame Length Register */
1161 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001164 tempval = MACCFG2_INIT_SETTINGS;
Claudiu Manoil88302642014-02-24 12:13:43 +02001165
Claudiu Manoil75354142015-07-13 16:22:06 +03001166 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1167 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1168 * and by checking RxBD[LG] and discarding larger than MAXFRM.
Claudiu Manoil88302642014-02-24 12:13:43 +02001169 */
Claudiu Manoil75354142015-07-13 16:22:06 +03001170 if (gfar_has_errata(priv, GFAR_ERRATA_74))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001171 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
Claudiu Manoil88302642014-02-24 12:13:43 +02001172
Anton Vorontsov7d350972010-06-30 06:39:12 +00001173 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Claudiu Manoila328ac92014-02-24 12:13:42 +02001175 /* Clear mac addr hash registers */
1176 gfar_write(&regs->igaddr0, 0);
1177 gfar_write(&regs->igaddr1, 0);
1178 gfar_write(&regs->igaddr2, 0);
1179 gfar_write(&regs->igaddr3, 0);
1180 gfar_write(&regs->igaddr4, 0);
1181 gfar_write(&regs->igaddr5, 0);
1182 gfar_write(&regs->igaddr6, 0);
1183 gfar_write(&regs->igaddr7, 0);
1184
1185 gfar_write(&regs->gaddr0, 0);
1186 gfar_write(&regs->gaddr1, 0);
1187 gfar_write(&regs->gaddr2, 0);
1188 gfar_write(&regs->gaddr3, 0);
1189 gfar_write(&regs->gaddr4, 0);
1190 gfar_write(&regs->gaddr5, 0);
1191 gfar_write(&regs->gaddr6, 0);
1192 gfar_write(&regs->gaddr7, 0);
1193
1194 if (priv->extended_hash)
1195 gfar_clear_exact_match(priv->ndev);
1196
1197 gfar_mac_rx_config(priv);
1198
1199 gfar_mac_tx_config(priv);
1200
1201 gfar_set_mac_address(priv->ndev);
1202
1203 gfar_set_multi(priv->ndev);
1204
1205 /* clear ievent and imask before configuring coalescing */
1206 gfar_ints_disable(priv);
1207
1208 /* Configure the coalescing support */
1209 gfar_configure_coalescing_all(priv);
1210}
1211
1212static void gfar_hw_init(struct gfar_private *priv)
1213{
1214 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1215 u32 attrs;
1216
1217 /* Stop the DMA engine now, in case it was running before
1218 * (The firmware could have used it, and left it running).
1219 */
1220 gfar_halt(priv);
1221
1222 gfar_mac_reset(priv);
1223
1224 /* Zero out the rmon mib registers if it has them */
1225 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1226 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1227
1228 /* Mask off the CAM interrupts */
1229 gfar_write(&regs->rmon.cam1, 0xffffffff);
1230 gfar_write(&regs->rmon.cam2, 0xffffffff);
1231 }
1232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001234 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001236 /* Set the extraction length and index */
1237 attrs = ATTRELI_EL(priv->rx_stash_size) |
1238 ATTRELI_EI(priv->rx_stash_index);
1239
1240 gfar_write(&regs->attreli, attrs);
1241
1242 /* Start with defaults, and add stashing
1243 * depending on driver parameters
1244 */
1245 attrs = ATTR_INIT_SETTINGS;
1246
1247 if (priv->bd_stash_en)
1248 attrs |= ATTR_BDSTASH;
1249
1250 if (priv->rx_stash_size != 0)
1251 attrs |= ATTR_BUFSTASH;
1252
1253 gfar_write(&regs->attr, attrs);
1254
1255 /* FIFO configs */
1256 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1257 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1258 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1259
Claudiu Manoil20862782014-02-17 12:53:14 +02001260 /* Program the interrupt steering regs, only for MG devices */
1261 if (priv->num_grps > 1)
1262 gfar_write_isrg(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Xiubo Li898157e2014-06-04 16:49:16 +08001265static void gfar_init_addr_hash_table(struct gfar_private *priv)
Claudiu Manoil20862782014-02-17 12:53:14 +02001266{
1267 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001268
Andy Flemingb31a1d82008-12-16 15:29:15 -08001269 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001270 priv->extended_hash = 1;
1271 priv->hash_width = 9;
1272
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001273 priv->hash_regs[0] = &regs->igaddr0;
1274 priv->hash_regs[1] = &regs->igaddr1;
1275 priv->hash_regs[2] = &regs->igaddr2;
1276 priv->hash_regs[3] = &regs->igaddr3;
1277 priv->hash_regs[4] = &regs->igaddr4;
1278 priv->hash_regs[5] = &regs->igaddr5;
1279 priv->hash_regs[6] = &regs->igaddr6;
1280 priv->hash_regs[7] = &regs->igaddr7;
1281 priv->hash_regs[8] = &regs->gaddr0;
1282 priv->hash_regs[9] = &regs->gaddr1;
1283 priv->hash_regs[10] = &regs->gaddr2;
1284 priv->hash_regs[11] = &regs->gaddr3;
1285 priv->hash_regs[12] = &regs->gaddr4;
1286 priv->hash_regs[13] = &regs->gaddr5;
1287 priv->hash_regs[14] = &regs->gaddr6;
1288 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001289
1290 } else {
1291 priv->extended_hash = 0;
1292 priv->hash_width = 8;
1293
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001294 priv->hash_regs[0] = &regs->gaddr0;
1295 priv->hash_regs[1] = &regs->gaddr1;
1296 priv->hash_regs[2] = &regs->gaddr2;
1297 priv->hash_regs[3] = &regs->gaddr3;
1298 priv->hash_regs[4] = &regs->gaddr4;
1299 priv->hash_regs[5] = &regs->gaddr5;
1300 priv->hash_regs[6] = &regs->gaddr6;
1301 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001302 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001303}
1304
1305/* Set up the ethernet device structure, private data,
1306 * and anything else we need before we start
1307 */
1308static int gfar_probe(struct platform_device *ofdev)
1309{
Johan Hovold42c70042016-11-28 19:25:02 +01001310 struct device_node *np = ofdev->dev.of_node;
Claudiu Manoil20862782014-02-17 12:53:14 +02001311 struct net_device *dev = NULL;
1312 struct gfar_private *priv = NULL;
1313 int err = 0, i;
1314
1315 err = gfar_of_init(ofdev, &dev);
1316
1317 if (err)
1318 return err;
1319
1320 priv = netdev_priv(dev);
1321 priv->ndev = dev;
1322 priv->ofdev = ofdev;
1323 priv->dev = &ofdev->dev;
1324 SET_NETDEV_DEV(dev, &ofdev->dev);
1325
Claudiu Manoil20862782014-02-17 12:53:14 +02001326 INIT_WORK(&priv->reset_task, gfar_reset_task);
1327
1328 platform_set_drvdata(ofdev, priv);
1329
1330 gfar_detect_errata(priv);
1331
Claudiu Manoil20862782014-02-17 12:53:14 +02001332 /* Set the dev->base_addr to the gfar reg region */
1333 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1334
1335 /* Fill in the dev structure */
1336 dev->watchdog_timeo = TX_TIMEOUT;
Jarod Wilson44770e12016-10-17 15:54:17 -04001337 /* MTU range: 50 - 9586 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001338 dev->mtu = 1500;
Jarod Wilson44770e12016-10-17 15:54:17 -04001339 dev->min_mtu = 50;
1340 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
Claudiu Manoil20862782014-02-17 12:53:14 +02001341 dev->netdev_ops = &gfar_netdev_ops;
1342 dev->ethtool_ops = &gfar_ethtool_ops;
1343
1344 /* Register for napi ...We are registering NAPI for each grp */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001345 for (i = 0; i < priv->num_grps; i++) {
1346 if (priv->poll_mode == GFAR_SQ_POLLING) {
1347 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1348 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001349 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001350 gfar_poll_tx_sq, 2);
1351 } else {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001352 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1353 gfar_poll_rx, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001354 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001355 gfar_poll_tx, 2);
1356 }
1357 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001358
1359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1360 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1361 NETIF_F_RXCSUM;
1362 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1363 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1364 }
1365
1366 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1367 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1368 NETIF_F_HW_VLAN_CTAG_RX;
1369 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1370 }
1371
Claudiu Manoil3d23a052015-05-06 18:07:30 +03001372 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1373
Claudiu Manoil20862782014-02-17 12:53:14 +02001374 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001375
Zumeng Chen58117672017-12-04 11:22:02 +08001376 /* Insert receive time stamps into padding alignment bytes, and
1377 * plus 2 bytes padding to ensure the cpu alignment.
1378 */
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001379 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Zumeng Chen58117672017-12-04 11:22:02 +08001380 priv->padding = 8 + DEFAULT_PADDING;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001381
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001382 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001383 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001384 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001386 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001387 for (i = 0; i < priv->num_tx_queues; i++) {
1388 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1389 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1390 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1391 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1392 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001393
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001394 for (i = 0; i < priv->num_rx_queues; i++) {
1395 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1396 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1397 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Hamish Martin7bff47d2015-12-15 14:14:50 +13001400 /* Always enable rx filer if available */
1401 priv->rx_filer_enable =
1402 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001403 /* Enable most messages by default */
1404 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001405 /* use pritority h/w tx queue scheduling for single queue devices */
1406 if (priv->num_tx_queues == 1)
1407 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001408
Claudiu Manoil08511332014-02-24 12:13:45 +02001409 set_bit(GFAR_DOWN, &priv->state);
1410
Claudiu Manoila328ac92014-02-24 12:13:42 +02001411 gfar_hw_init(priv);
Trent Piephod3eab822008-10-02 11:12:24 +00001412
Fabio Estevamd4c642e2014-06-03 19:55:38 -03001413 /* Carrier starts down, phylib will bring it up */
1414 netif_carrier_off(dev);
1415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 err = register_netdev(dev);
1417
1418 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001419 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 goto register_fail;
1421 }
1422
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001423 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1424 priv->wol_supported |= GFAR_WOL_MAGIC;
1425
1426 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1427 priv->rx_filer_enable)
1428 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1429
1430 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001431
Dai Harukic50a5d92008-12-17 16:51:32 -08001432 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001433 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001434 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001435 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001436 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001437 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001438 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001439 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001440 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001441 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001442 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001443 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001444 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001445
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001446 /* Initialize the filer table */
1447 gfar_init_filer_table(priv);
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001450 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Jan Ceuleers0977f812012-06-05 03:42:12 +00001452 /* Even more device info helps when determining which kernel
1453 * provided which set of benchmarks.
1454 */
Joe Perches59deab22011-06-14 08:57:47 +00001455 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001456 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001457 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1458 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001459 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001460 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1461 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 return 0;
1464
1465register_fail:
Johan Hovold42c70042016-11-28 19:25:02 +01001466 if (of_phy_is_fixed_link(np))
1467 of_phy_deregister_fixed_link(np);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001468 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001469 gfar_free_rx_queues(priv);
1470 gfar_free_tx_queues(priv);
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001471 of_node_put(priv->phy_node);
1472 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001473 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001474 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
1476
Grant Likely2dc11582010-08-06 09:25:50 -06001477static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001479 struct gfar_private *priv = platform_get_drvdata(ofdev);
Johan Hovold42c70042016-11-28 19:25:02 +01001480 struct device_node *np = ofdev->dev.of_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001482 of_node_put(priv->phy_node);
1483 of_node_put(priv->tbi_node);
Grant Likelyfe192a42009-04-25 12:53:12 +00001484
David S. Millerd9d8e042009-09-06 01:41:02 -07001485 unregister_netdev(priv->ndev);
Johan Hovold42c70042016-11-28 19:25:02 +01001486
1487 if (of_phy_is_fixed_link(np))
1488 of_phy_deregister_fixed_link(np);
1489
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001490 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001491 gfar_free_rx_queues(priv);
1492 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001493 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 return 0;
1496}
1497
Scott Woodd87eb122008-07-11 18:04:45 -05001498#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001499
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001500static void __gfar_filer_disable(struct gfar_private *priv)
1501{
1502 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1503 u32 temp;
1504
1505 temp = gfar_read(&regs->rctrl);
1506 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1507 gfar_write(&regs->rctrl, temp);
1508}
1509
1510static void __gfar_filer_enable(struct gfar_private *priv)
1511{
1512 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1513 u32 temp;
1514
1515 temp = gfar_read(&regs->rctrl);
1516 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1517 gfar_write(&regs->rctrl, temp);
1518}
1519
1520/* Filer rules implementing wol capabilities */
1521static void gfar_filer_config_wol(struct gfar_private *priv)
1522{
1523 unsigned int i;
1524 u32 rqfcr;
1525
1526 __gfar_filer_disable(priv);
1527
1528 /* clear the filer table, reject any packet by default */
1529 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1530 for (i = 0; i <= MAX_FILER_IDX; i++)
1531 gfar_write_filer(priv, i, rqfcr, 0);
1532
1533 i = 0;
1534 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1535 /* unicast packet, accept it */
1536 struct net_device *ndev = priv->ndev;
1537 /* get the default rx queue index */
1538 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1539 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1540 (ndev->dev_addr[1] << 8) |
1541 ndev->dev_addr[2];
1542
1543 rqfcr = (qindex << 10) | RQFCR_AND |
1544 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1545
1546 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1547
1548 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1549 (ndev->dev_addr[4] << 8) |
1550 ndev->dev_addr[5];
1551 rqfcr = (qindex << 10) | RQFCR_GPI |
1552 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1553 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1554 }
1555
1556 __gfar_filer_enable(priv);
1557}
1558
1559static void gfar_filer_restore_table(struct gfar_private *priv)
1560{
1561 u32 rqfcr, rqfpr;
1562 unsigned int i;
1563
1564 __gfar_filer_disable(priv);
1565
1566 for (i = 0; i <= MAX_FILER_IDX; i++) {
1567 rqfcr = priv->ftp_rqfcr[i];
1568 rqfpr = priv->ftp_rqfpr[i];
1569 gfar_write_filer(priv, i, rqfcr, rqfpr);
1570 }
1571
1572 __gfar_filer_enable(priv);
1573}
1574
1575/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1576static void gfar_start_wol_filer(struct gfar_private *priv)
1577{
1578 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1579 u32 tempval;
1580 int i = 0;
1581
1582 /* Enable Rx hw queues */
1583 gfar_write(&regs->rqueue, priv->rqueue);
1584
1585 /* Initialize DMACTRL to have WWR and WOP */
1586 tempval = gfar_read(&regs->dmactrl);
1587 tempval |= DMACTRL_INIT_SETTINGS;
1588 gfar_write(&regs->dmactrl, tempval);
1589
1590 /* Make sure we aren't stopped */
1591 tempval = gfar_read(&regs->dmactrl);
1592 tempval &= ~DMACTRL_GRS;
1593 gfar_write(&regs->dmactrl, tempval);
1594
1595 for (i = 0; i < priv->num_grps; i++) {
1596 regs = priv->gfargrp[i].regs;
1597 /* Clear RHLT, so that the DMA starts polling now */
1598 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1599 /* enable the Filer General Purpose Interrupt */
1600 gfar_write(&regs->imask, IMASK_FGPI);
1601 }
1602
1603 /* Enable Rx DMA */
1604 tempval = gfar_read(&regs->maccfg1);
1605 tempval |= MACCFG1_RX_EN;
1606 gfar_write(&regs->maccfg1, tempval);
1607}
1608
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001609static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001610{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001611 struct gfar_private *priv = dev_get_drvdata(dev);
1612 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001613 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001614 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001615 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001616
Claudiu Manoil614b4242015-07-31 18:38:32 +03001617 if (!netif_running(ndev))
1618 return 0;
1619
1620 disable_napi(priv);
1621 netif_tx_lock(ndev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001622 netif_device_detach(ndev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001623 netif_tx_unlock(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001624
Claudiu Manoil614b4242015-07-31 18:38:32 +03001625 gfar_halt(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001626
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001627 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001628 /* Enable interrupt on Magic Packet */
1629 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001630
Claudiu Manoil614b4242015-07-31 18:38:32 +03001631 /* Enable Magic Packet mode */
1632 tempval = gfar_read(&regs->maccfg2);
1633 tempval |= MACCFG2_MPEN;
1634 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001635
Claudiu Manoil614b4242015-07-31 18:38:32 +03001636 /* re-enable the Rx block */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001637 tempval = gfar_read(&regs->maccfg1);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001638 tempval |= MACCFG1_RX_EN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001639 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001640
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001641 } else if (wol & GFAR_WOL_FILER_UCAST) {
1642 gfar_filer_config_wol(priv);
1643 gfar_start_wol_filer(priv);
1644
Claudiu Manoil614b4242015-07-31 18:38:32 +03001645 } else {
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001646 phy_stop(ndev->phydev);
Scott Woodd87eb122008-07-11 18:04:45 -05001647 }
1648
1649 return 0;
1650}
1651
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001652static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001653{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001654 struct gfar_private *priv = dev_get_drvdata(dev);
1655 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001656 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001657 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001658 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001659
Claudiu Manoil614b4242015-07-31 18:38:32 +03001660 if (!netif_running(ndev))
Scott Woodd87eb122008-07-11 18:04:45 -05001661 return 0;
Scott Woodd87eb122008-07-11 18:04:45 -05001662
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001663 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001664 /* Disable Magic Packet mode */
1665 tempval = gfar_read(&regs->maccfg2);
1666 tempval &= ~MACCFG2_MPEN;
1667 gfar_write(&regs->maccfg2, tempval);
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001668
1669 } else if (wol & GFAR_WOL_FILER_UCAST) {
1670 /* need to stop rx only, tx is already down */
1671 gfar_halt(priv);
1672 gfar_filer_restore_table(priv);
1673
Claudiu Manoil614b4242015-07-31 18:38:32 +03001674 } else {
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001675 phy_start(ndev->phydev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001676 }
Scott Woodd87eb122008-07-11 18:04:45 -05001677
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001678 gfar_start(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001679
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001680 netif_device_attach(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001681 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001682
1683 return 0;
1684}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001685
1686static int gfar_restore(struct device *dev)
1687{
1688 struct gfar_private *priv = dev_get_drvdata(dev);
1689 struct net_device *ndev = priv->ndev;
1690
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001691 if (!netif_running(ndev)) {
1692 netif_device_attach(ndev);
1693
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001694 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001695 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001696
Claudiu Manoil76f31e82015-07-13 16:22:03 +03001697 gfar_init_bds(ndev);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001698
Claudiu Manoila328ac92014-02-24 12:13:42 +02001699 gfar_mac_reset(priv);
1700
1701 gfar_init_tx_rx_base(priv);
1702
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001703 gfar_start(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001704
1705 priv->oldlink = 0;
1706 priv->oldspeed = 0;
1707 priv->oldduplex = -1;
1708
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001709 if (ndev->phydev)
1710 phy_start(ndev->phydev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001711
1712 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001713 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001714
1715 return 0;
1716}
1717
Arvind Yadavee272442017-06-29 11:26:06 +05301718static const struct dev_pm_ops gfar_pm_ops = {
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001719 .suspend = gfar_suspend,
1720 .resume = gfar_resume,
1721 .freeze = gfar_suspend,
1722 .thaw = gfar_resume,
1723 .restore = gfar_restore,
1724};
1725
1726#define GFAR_PM_OPS (&gfar_pm_ops)
1727
Scott Woodd87eb122008-07-11 18:04:45 -05001728#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001729
1730#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001731
Scott Woodd87eb122008-07-11 18:04:45 -05001732#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001734/* Reads the controller's registers to determine what interface
1735 * connects it to the PHY.
1736 */
1737static phy_interface_t gfar_get_interface(struct net_device *dev)
1738{
1739 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001740 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001741 u32 ecntrl;
1742
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001743 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001744
1745 if (ecntrl & ECNTRL_SGMII_MODE)
1746 return PHY_INTERFACE_MODE_SGMII;
1747
1748 if (ecntrl & ECNTRL_TBI_MODE) {
1749 if (ecntrl & ECNTRL_REDUCED_MODE)
1750 return PHY_INTERFACE_MODE_RTBI;
1751 else
1752 return PHY_INTERFACE_MODE_TBI;
1753 }
1754
1755 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001756 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001757 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001758 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001759 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001760 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001761
Jan Ceuleers0977f812012-06-05 03:42:12 +00001762 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001763 * be set by the device tree or platform code.
1764 */
1765 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1766 return PHY_INTERFACE_MODE_RGMII_ID;
1767
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001768 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001769 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001770 }
1771
Andy Flemingb31a1d82008-12-16 15:29:15 -08001772 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001773 return PHY_INTERFACE_MODE_GMII;
1774
1775 return PHY_INTERFACE_MODE_MII;
1776}
1777
1778
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001779/* Initializes driver's PHY state, and attaches to the PHY.
1780 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 */
1782static int init_phy(struct net_device *dev)
1783{
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01001784 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 struct gfar_private *priv = netdev_priv(dev);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001786 phy_interface_t interface;
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001787 struct phy_device *phydev;
Claudiu Manoilb6b5e8a2017-12-07 18:44:23 +02001788 struct ethtool_eee edata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01001790 linkmode_set_bit_array(phy_10_100_features_array,
1791 ARRAY_SIZE(phy_10_100_features_array),
1792 mask);
1793 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1794 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1795 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1796 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 priv->oldlink = 0;
1799 priv->oldspeed = 0;
1800 priv->oldduplex = -1;
1801
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001802 interface = gfar_get_interface(dev);
1803
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001804 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1805 interface);
1806 if (!phydev) {
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001807 dev_err(&dev->dev, "could not attach to PHY\n");
1808 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Kapil Junejad3c12872007-05-11 18:25:11 -05001811 if (interface == PHY_INTERFACE_MODE_SGMII)
1812 gfar_configure_serdes(dev);
1813
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001814 /* Remove any features not supported by the controller */
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01001815 linkmode_and(phydev->supported, phydev->supported, mask);
1816 linkmode_copy(phydev->advertising, phydev->supported);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
Andrew Lunnaf8d9bb2018-09-12 01:53:15 +02001818 /* Add support for flow control */
1819 phy_support_asym_pause(phydev);
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001820
Claudiu Manoilb6b5e8a2017-12-07 18:44:23 +02001821 /* disable EEE autoneg, EEE not supported by eTSEC */
1822 memset(&edata, 0, sizeof(struct ethtool_eee));
1823 phy_ethtool_set_eee(phydev, &edata);
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826}
1827
Jan Ceuleers0977f812012-06-05 03:42:12 +00001828/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001829 * SERDES lynx PHY on the chip. We communicate with this PHY
1830 * through the MDIO bus on each controller, treating it as a
1831 * "normal" PHY at the address found in the TBIPA register. We assume
1832 * that the TBIPA register is valid. Either the MDIO bus code will set
1833 * it to a value that doesn't conflict with other PHYs on the bus, or the
1834 * value doesn't matter, as there are no other PHYs on the bus.
1835 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001836static void gfar_configure_serdes(struct net_device *dev)
1837{
1838 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001839 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001840
Grant Likelyfe192a42009-04-25 12:53:12 +00001841 if (!priv->tbi_node) {
1842 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1843 "device tree specify a tbi-handle\n");
1844 return;
1845 }
1846
1847 tbiphy = of_phy_find_device(priv->tbi_node);
1848 if (!tbiphy) {
1849 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001850 return;
1851 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001852
Jan Ceuleers0977f812012-06-05 03:42:12 +00001853 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001854 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1855 * everything for us? Resetting it takes the link down and requires
1856 * several seconds for it to come back.
1857 */
Russell King38737e42015-09-24 20:36:28 +01001858 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
Andrew Lunne5a03bf2016-01-06 20:11:16 +01001859 put_device(&tbiphy->mdio.dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -08001860 return;
Russell King38737e42015-09-24 20:36:28 +01001861 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001862
Paul Gortmakerd0313582008-04-17 00:08:10 -04001863 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001864 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001865
Grant Likelyfe192a42009-04-25 12:53:12 +00001866 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001867 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1868 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001869
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001870 phy_write(tbiphy, MII_BMCR,
1871 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1872 BMCR_SPEED1000);
Russell King04d53b22015-09-24 20:36:18 +01001873
Andrew Lunne5a03bf2016-01-06 20:11:16 +01001874 put_device(&tbiphy->mdio.dev);
Kapil Junejad3c12872007-05-11 18:25:11 -05001875}
1876
Anton Vorontsov511d9342010-06-30 06:39:15 +00001877static int __gfar_is_rx_idle(struct gfar_private *priv)
1878{
1879 u32 res;
1880
Jan Ceuleers0977f812012-06-05 03:42:12 +00001881 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001882 * actually wait for IEVENT_GRSC flag.
1883 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001884 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001885 return 0;
1886
Jan Ceuleers0977f812012-06-05 03:42:12 +00001887 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001888 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1889 * and the Rx can be safely reset.
1890 */
1891 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1892 res &= 0x7f807f80;
1893 if ((res & 0xffff) == (res >> 16))
1894 return 1;
1895
1896 return 0;
1897}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001898
1899/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001900static void gfar_halt_nodisable(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901{
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001902 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 u32 tempval;
Claudiu Manoila4feee82014-10-07 10:44:34 +03001904 unsigned int timeout;
1905 int stopped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001907 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Claudiu Manoila4feee82014-10-07 10:44:34 +03001909 if (gfar_is_dma_stopped(priv))
1910 return;
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001913 tempval = gfar_read(&regs->dmactrl);
Claudiu Manoila4feee82014-10-07 10:44:34 +03001914 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1915 gfar_write(&regs->dmactrl, tempval);
Anton Vorontsov511d9342010-06-30 06:39:15 +00001916
Claudiu Manoila4feee82014-10-07 10:44:34 +03001917retry:
1918 timeout = 1000;
1919 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1920 cpu_relax();
1921 timeout--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 }
Claudiu Manoila4feee82014-10-07 10:44:34 +03001923
1924 if (!timeout)
1925 stopped = gfar_is_dma_stopped(priv);
1926
1927 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1928 !__gfar_is_rx_idle(priv))
1929 goto retry;
Scott Woodd87eb122008-07-11 18:04:45 -05001930}
Scott Woodd87eb122008-07-11 18:04:45 -05001931
1932/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001933void gfar_halt(struct gfar_private *priv)
Scott Woodd87eb122008-07-11 18:04:45 -05001934{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001935 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001936 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001938 /* Dissable the Rx/Tx hw queues */
1939 gfar_write(&regs->rqueue, 0);
1940 gfar_write(&regs->tqueue, 0);
Scott Wood2a54adc2008-08-12 15:10:46 -05001941
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001942 mdelay(10);
1943
1944 gfar_halt_nodisable(priv);
1945
1946 /* Disable Rx/Tx DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 tempval = gfar_read(&regs->maccfg1);
1948 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1949 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001950}
1951
1952void stop_gfar(struct net_device *dev)
1953{
1954 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001955
Claudiu Manoil08511332014-02-24 12:13:45 +02001956 netif_tx_stop_all_queues(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001957
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001958 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02001959 set_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001960 smp_mb__after_atomic();
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001961
Claudiu Manoil08511332014-02-24 12:13:45 +02001962 disable_napi(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001963
Claudiu Manoil08511332014-02-24 12:13:45 +02001964 /* disable ints and gracefully shut down Rx/Tx DMA */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001965 gfar_halt(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001967 phy_stop(dev->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970}
1971
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001972static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001975 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001976 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001978 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001980 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1981 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001982 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Claudiu Manoila7312d52015-03-13 10:36:28 +02001984 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1985 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001986 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001987 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001988 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001989 txbdp++;
Claudiu Manoila7312d52015-03-13 10:36:28 +02001990 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1991 be16_to_cpu(txbdp->length),
1992 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001994 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001995 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1996 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001998 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001999 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002002static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2003{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002004 int i;
2005
Claudiu Manoil75354142015-07-13 16:22:06 +03002006 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
2007
2008 if (rx_queue->skb)
2009 dev_kfree_skb(rx_queue->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002011 for (i = 0; i < rx_queue->rx_ring_size; i++) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002012 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
2013
Anton Vorontsove69edd22009-10-12 06:00:30 +00002014 rxbdp->lstatus = 0;
2015 rxbdp->bufPtr = 0;
2016 rxbdp++;
Claudiu Manoil75354142015-07-13 16:22:06 +03002017
2018 if (!rxb->page)
2019 continue;
2020
Arseny Solokha4af0e5b2017-01-29 19:52:20 +07002021 dma_unmap_page(rx_queue->dev, rxb->dma,
2022 PAGE_SIZE, DMA_FROM_DEVICE);
Claudiu Manoil75354142015-07-13 16:22:06 +03002023 __free_page(rxb->page);
2024
2025 rxb->page = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002027
2028 kfree(rx_queue->rx_buff);
2029 rx_queue->rx_buff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002030}
Anton Vorontsove69edd22009-10-12 06:00:30 +00002031
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002032/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00002033 * Then free tx_skbuff and rx_skbuff
2034 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002035static void free_skb_resources(struct gfar_private *priv)
2036{
2037 struct gfar_priv_tx_q *tx_queue = NULL;
2038 struct gfar_priv_rx_q *rx_queue = NULL;
2039 int i;
2040
2041 /* Go through all the buffer descriptors and free their data buffers */
2042 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002043 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002044
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002045 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002046 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002047 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002048 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002049 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002050 }
2051
2052 for (i = 0; i < priv->num_rx_queues; i++) {
2053 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002054 if (rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002055 free_skb_rx_queue(rx_queue);
2056 }
2057
Claudiu Manoil369ec162013-02-14 05:00:02 +00002058 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002059 sizeof(struct txbd8) * priv->total_tx_ring_size +
2060 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2061 priv->tx_queue[0]->tx_bd_base,
2062 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063}
2064
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002065void gfar_start(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002066{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002067 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002068 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002069 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002070
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002071 /* Enable Rx/Tx hw queues */
2072 gfar_write(&regs->rqueue, priv->rqueue);
2073 gfar_write(&regs->tqueue, priv->tqueue);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002074
2075 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002076 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002077 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002078 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002079
Kumar Gala0bbaf062005-06-20 10:54:21 -05002080 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002081 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002082 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002083 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002084
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002085 for (i = 0; i < priv->num_grps; i++) {
2086 regs = priv->gfargrp[i].regs;
2087 /* Clear THLT/RHLT, so that the DMA starts polling now */
2088 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2089 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002090 }
Dai Haruki12dea572008-12-16 15:30:20 -08002091
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002092 /* Enable Rx/Tx DMA */
2093 tempval = gfar_read(&regs->maccfg1);
2094 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2095 gfar_write(&regs->maccfg1, tempval);
2096
Claudiu Manoilefeddce2014-02-17 12:53:17 +02002097 gfar_ints_enable(priv);
2098
Florian Westphal860e9532016-05-03 16:33:13 +02002099 netif_trans_update(priv->ndev); /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002100}
2101
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002102static void free_grp_irqs(struct gfar_priv_grp *grp)
2103{
2104 free_irq(gfar_irq(grp, TX)->irq, grp);
2105 free_irq(gfar_irq(grp, RX)->irq, grp);
2106 free_irq(gfar_irq(grp, ER)->irq, grp);
2107}
2108
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002109static int register_grp_irqs(struct gfar_priv_grp *grp)
2110{
2111 struct gfar_private *priv = grp->priv;
2112 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00002113 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00002116 * them. Otherwise, only register for the one
2117 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002118 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002119 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00002120 * Transmit, and Receive
2121 */
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002122 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002123 gfar_irq(grp, ER)->name, grp);
2124 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002125 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002126 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002127
Julia Lawall2145f1a2010-08-05 10:26:20 +00002128 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002130 enable_irq_wake(gfar_irq(grp, ER)->irq);
2131
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002132 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2133 gfar_irq(grp, TX)->name, grp);
2134 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002135 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002136 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 goto tx_irq_fail;
2138 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002139 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2140 gfar_irq(grp, RX)->name, grp);
2141 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002142 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002143 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 goto rx_irq_fail;
2145 }
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002146 enable_irq_wake(gfar_irq(grp, RX)->irq);
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 } else {
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002149 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002150 gfar_irq(grp, TX)->name, grp);
2151 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002152 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002153 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 goto err_irq_fail;
2155 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002156 enable_irq_wake(gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 }
2158
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002159 return 0;
2160
2161rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002162 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002163tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002164 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002165err_irq_fail:
2166 return err;
2167
2168}
2169
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002170static void gfar_free_irq(struct gfar_private *priv)
2171{
2172 int i;
2173
2174 /* Free the IRQs */
2175 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2176 for (i = 0; i < priv->num_grps; i++)
2177 free_grp_irqs(&priv->gfargrp[i]);
2178 } else {
2179 for (i = 0; i < priv->num_grps; i++)
2180 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2181 &priv->gfargrp[i]);
2182 }
2183}
2184
2185static int gfar_request_irq(struct gfar_private *priv)
2186{
2187 int err, i, j;
2188
2189 for (i = 0; i < priv->num_grps; i++) {
2190 err = register_grp_irqs(&priv->gfargrp[i]);
2191 if (err) {
2192 for (j = 0; j < i; j++)
2193 free_grp_irqs(&priv->gfargrp[j]);
2194 return err;
2195 }
2196 }
2197
2198 return 0;
2199}
2200
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002201/* Bring the controller up and running */
2202int startup_gfar(struct net_device *ndev)
2203{
2204 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002205 int err;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002206
Claudiu Manoila328ac92014-02-24 12:13:42 +02002207 gfar_mac_reset(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002208
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002209 err = gfar_alloc_skb_resources(ndev);
2210 if (err)
2211 return err;
2212
Claudiu Manoila328ac92014-02-24 12:13:42 +02002213 gfar_init_tx_rx_base(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002214
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002215 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002216 clear_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002217 smp_mb__after_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002218
2219 /* Start Rx/Tx DMA and enable the interrupts */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002220 gfar_start(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Claudiu Manoil2a4eebf2015-08-13 16:50:37 +03002222 /* force link state update after mac reset */
2223 priv->oldlink = 0;
2224 priv->oldspeed = 0;
2225 priv->oldduplex = -1;
2226
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02002227 phy_start(ndev->phydev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00002228
Claudiu Manoil08511332014-02-24 12:13:45 +02002229 enable_napi(priv);
2230
2231 netif_tx_wake_all_queues(ndev);
2232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234}
2235
Jan Ceuleers0977f812012-06-05 03:42:12 +00002236/* Called when something needs to use the ethernet device
2237 * Returns 0 for success.
2238 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239static int gfar_enet_open(struct net_device *dev)
2240{
Li Yang94e8cc32007-10-12 21:53:51 +08002241 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 int err;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 err = init_phy(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002245 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return err;
2247
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002248 err = gfar_request_irq(priv);
2249 if (err)
2250 return err;
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002253 if (err)
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04002254 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
2256 return err;
2257}
2258
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002259static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002260{
Johannes Bergd58ff352017-06-16 14:29:23 +02002261 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002262
2263 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002264
Kumar Gala0bbaf062005-06-20 10:54:21 -05002265 return fcb;
2266}
2267
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002268static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002269 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002270{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002271 /* If we're here, it's a IP packet with a TCP or UDP
2272 * payload. We set it to checksum, using a pseudo-header
2273 * we provide
2274 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002275 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002276
Jan Ceuleers0977f812012-06-05 03:42:12 +00002277 /* Tell the controller what the protocol is
2278 * And provide the already calculated phcs
2279 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002280 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002281 flags |= TXFCB_UDP;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002282 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002283 } else
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002284 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002285
2286 /* l3os is the distance between the start of the
2287 * frame (skb->data) and the start of the IP hdr.
2288 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002289 * l3 hdr and the l4 hdr
2290 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002291 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002292 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002293
Andy Fleming7f7f5312005-11-11 12:38:59 -06002294 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002295}
2296
Arnd Bergmann278af572016-06-16 15:52:13 +02002297static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002298{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002299 fcb->flags |= TXFCB_VLN;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002300 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
Kumar Gala0bbaf062005-06-20 10:54:21 -05002301}
2302
Dai Haruki4669bc92008-12-17 16:51:04 -08002303static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002304 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002305{
2306 struct txbd8 *new_bd = bdp + stride;
2307
2308 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2309}
2310
2311static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002312 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002313{
2314 return skip_txbd(bdp, 1, base, ring_size);
2315}
2316
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002317/* eTSEC12: csum generation not supported for some fcb offsets */
2318static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2319 unsigned long fcb_addr)
2320{
2321 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2322 (fcb_addr % 0x20) > 0x18);
2323}
2324
2325/* eTSEC76: csum generation for frames larger than 2500 may
2326 * cause excess delays before start of transmission
2327 */
2328static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2329 unsigned int len)
2330{
2331 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2332 (len > 2500));
2333}
2334
Jan Ceuleers0977f812012-06-05 03:42:12 +00002335/* This is called by the kernel when a frame is ready for transmission.
2336 * It is pointed to by the dev->hard_start_xmit function pointer
2337 */
YueHaibing06983aa2018-09-21 10:50:32 +08002338static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339{
2340 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002341 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002342 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002343 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002344 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002345 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002346 u32 lstatus;
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002347 skb_frag_t *frag;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002348 int i, rq = 0;
2349 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002350 u32 bufaddr;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002351 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002352
2353 rq = skb->queue_mapping;
2354 tx_queue = priv->tx_queue[rq];
2355 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002356 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002357 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002358
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002359 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002360 do_vlan = skb_vlan_tag_present(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002361 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2362 priv->hwts_tx_en;
2363
2364 if (do_csum || do_vlan)
2365 fcb_len = GMAC_FCB_LEN;
2366
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002367 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002368 if (unlikely(do_tstamp))
2369 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002370
Li Yang5b28bea2009-03-27 15:54:30 -07002371 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002372 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002373 struct sk_buff *skb_new;
2374
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002375 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002376 if (!skb_new) {
2377 dev->stats.tx_errors++;
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002378 dev_kfree_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002379 return NETDEV_TX_OK;
2380 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002381
Eric Dumazet313b0372012-07-05 11:45:13 +00002382 if (skb->sk)
2383 skb_set_owner_w(skb_new, skb->sk);
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002384 dev_consume_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002385 skb = skb_new;
2386 }
2387
Dai Haruki4669bc92008-12-17 16:51:04 -08002388 /* total number of fragments in the SKB */
2389 nr_frags = skb_shinfo(skb)->nr_frags;
2390
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002391 /* calculate the required number of TxBDs for this skb */
2392 if (unlikely(do_tstamp))
2393 nr_txbds = nr_frags + 2;
2394 else
2395 nr_txbds = nr_frags + 1;
2396
Dai Haruki4669bc92008-12-17 16:51:04 -08002397 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002398 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002399 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002400 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002401 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002402 return NETDEV_TX_BUSY;
2403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002406 bytes_sent = skb->len;
2407 tx_queue->stats.tx_bytes += bytes_sent;
2408 /* keep Tx bytes on wire for BQL accounting */
2409 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002410 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002412 txbdp = txbdp_start = tx_queue->cur_tx;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002413 lstatus = be32_to_cpu(txbdp->lstatus);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002414
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002415 /* Add TxPAL between FCB and frame if required */
2416 if (unlikely(do_tstamp)) {
2417 skb_push(skb, GMAC_TXPAL_LEN);
2418 memset(skb->data, 0, GMAC_TXPAL_LEN);
2419 }
2420
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002421 /* Add TxFCB if required */
2422 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002423 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002424 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002425 }
2426
2427 /* Set up checksumming */
2428 if (do_csum) {
2429 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002430
2431 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2432 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002433 __skb_pull(skb, GMAC_FCB_LEN);
2434 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002435 if (do_vlan || do_tstamp) {
2436 /* put back a new fcb for vlan/tstamp TOE */
2437 fcb = gfar_add_fcb(skb);
2438 } else {
2439 /* Tx TOE not used */
2440 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2441 fcb = NULL;
2442 }
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002443 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002444 }
2445
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002446 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002447 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002448
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002449 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2450 DMA_TO_DEVICE);
2451 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2452 goto dma_map_err;
2453
Claudiu Manoila7312d52015-03-13 10:36:28 +02002454 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Claudiu Manoile19d0832016-02-23 11:48:37 +02002456 /* Time stamp insertion requires one additional TxBD */
2457 if (unlikely(do_tstamp))
2458 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2459 tx_queue->tx_ring_size);
2460
Claudiu Manoil48963b42016-02-23 11:48:39 +02002461 if (likely(!nr_frags)) {
Yangbo Lu9c8b0772016-06-02 17:36:28 +08002462 if (likely(!do_tstamp))
2463 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002464 } else {
2465 u32 lstatus_start = lstatus;
2466
2467 /* Place the fragment addresses and lengths into the TxBDs */
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002468 frag = &skb_shinfo(skb)->frags[0];
2469 for (i = 0; i < nr_frags; i++, frag++) {
2470 unsigned int size;
2471
Claudiu Manoile19d0832016-02-23 11:48:37 +02002472 /* Point at the next BD, wrapping as needed */
2473 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2474
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002475 size = skb_frag_size(frag);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002476
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002477 lstatus = be32_to_cpu(txbdp->lstatus) | size |
Claudiu Manoile19d0832016-02-23 11:48:37 +02002478 BD_LFLAG(TXBD_READY);
2479
2480 /* Handle the last BD specially */
2481 if (i == nr_frags - 1)
2482 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2483
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002484 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
2485 size, DMA_TO_DEVICE);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002486 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2487 goto dma_map_err;
2488
2489 /* set the TxBD length and buffer pointer */
2490 txbdp->bufPtr = cpu_to_be32(bufaddr);
2491 txbdp->lstatus = cpu_to_be32(lstatus);
2492 }
2493
2494 lstatus = lstatus_start;
2495 }
2496
Jan Ceuleers0977f812012-06-05 03:42:12 +00002497 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002498 * first TxBD points to the FCB and must have a data length of
2499 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2500 * the full frame length.
2501 */
2502 if (unlikely(do_tstamp)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002503 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2504
2505 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2506 bufaddr += fcb_len;
Claudiu Manoil48963b42016-02-23 11:48:39 +02002507
Claudiu Manoila7312d52015-03-13 10:36:28 +02002508 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2509 (skb_headlen(skb) - fcb_len);
Claudiu Manoil48963b42016-02-23 11:48:39 +02002510 if (!nr_frags)
2511 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002512
2513 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2514 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002515 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
Claudiu Manoile19d0832016-02-23 11:48:37 +02002516
2517 /* Setup tx hardware time stamping */
2518 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2519 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002520 } else {
2521 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002524 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002525
Claudiu Manoild55398b2014-10-07 10:44:35 +03002526 gfar_wmb();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002527
Claudiu Manoila7312d52015-03-13 10:36:28 +02002528 txbdp_start->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002529
Claudiu Manoild55398b2014-10-07 10:44:35 +03002530 gfar_wmb(); /* force lstatus write before tx_skbuff */
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002531
2532 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2533
Dai Haruki4669bc92008-12-17 16:51:04 -08002534 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002535 * (wrapping if necessary)
2536 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002537 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002538 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002539
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002540 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002541
Claudiu Manoilbc602282015-05-06 18:07:29 +03002542 /* We can work in parallel with gfar_clean_tx_ring(), except
2543 * when modifying num_txbdfree. Note that we didn't grab the lock
2544 * when we were reading the num_txbdfree and checking for available
2545 * space, that's because outside of this function it can only grow.
2546 */
2547 spin_lock_bh(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002548 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002549 tx_queue->num_txbdfree -= (nr_txbds);
Claudiu Manoilbc602282015-05-06 18:07:29 +03002550 spin_unlock_bh(&tx_queue->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
2552 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002553 * are full. We need to tell the kernel to stop sending us stuff.
2554 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002555 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002556 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002558 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 }
2560
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002562 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002564 return NETDEV_TX_OK;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002565
2566dma_map_err:
2567 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2568 if (do_tstamp)
2569 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2570 for (i = 0; i < nr_frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002571 lstatus = be32_to_cpu(txbdp->lstatus);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002572 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2573 break;
2574
Claudiu Manoila7312d52015-03-13 10:36:28 +02002575 lstatus &= ~BD_LFLAG(TXBD_READY);
2576 txbdp->lstatus = cpu_to_be32(lstatus);
2577 bufaddr = be32_to_cpu(txbdp->bufPtr);
2578 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002579 DMA_TO_DEVICE);
2580 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2581 }
2582 gfar_wmb();
2583 dev_kfree_skb_any(skb);
2584 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585}
2586
2587/* Stops the kernel queue, and halts the controller */
2588static int gfar_close(struct net_device *dev)
2589{
2590 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002591
Sebastian Siewiorab939902008-08-19 21:12:45 +02002592 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 stop_gfar(dev);
2594
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002595 /* Disconnect from the PHY */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02002596 phy_disconnect(dev->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002598 gfar_free_irq(priv);
2599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 return 0;
2601}
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002604static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002606 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607
2608 return 0;
2609}
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2612{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Claudiu Manoil08511332014-02-24 12:13:45 +02002615 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2616 cpu_relax();
2617
Claudiu Manoil88302642014-02-24 12:13:43 +02002618 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 stop_gfar(dev);
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 dev->mtu = new_mtu;
2622
Claudiu Manoil88302642014-02-24 12:13:43 +02002623 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 startup_gfar(dev);
2625
Claudiu Manoil08511332014-02-24 12:13:45 +02002626 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2627
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 return 0;
2629}
2630
Claudiu Manoil08511332014-02-24 12:13:45 +02002631void reset_gfar(struct net_device *ndev)
2632{
2633 struct gfar_private *priv = netdev_priv(ndev);
2634
2635 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2636 cpu_relax();
2637
2638 stop_gfar(ndev);
2639 startup_gfar(ndev);
2640
2641 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2642}
2643
Sebastian Siewiorab939902008-08-19 21:12:45 +02002644/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 * transmitted after a set amount of time.
2646 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002647 * starting over will fix the problem.
2648 */
2649static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002651 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002652 reset_task);
Claudiu Manoil08511332014-02-24 12:13:45 +02002653 reset_gfar(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654}
2655
Sebastian Siewiorab939902008-08-19 21:12:45 +02002656static void gfar_timeout(struct net_device *dev)
2657{
2658 struct gfar_private *priv = netdev_priv(dev);
2659
2660 dev->stats.tx_errors++;
2661 schedule_work(&priv->reset_task);
2662}
2663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002665static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002667 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002668 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002669 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002670 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002671 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002672 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002673 struct sk_buff *skb;
2674 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002675 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002676 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002677 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002678 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002679 int tqi = tx_queue->qindex;
2680 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002681 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002682 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002684 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002685 bdp = tx_queue->dirty_tx;
2686 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002687
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002688 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002689
Dai Haruki4669bc92008-12-17 16:51:04 -08002690 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002691
Jan Ceuleers0977f812012-06-05 03:42:12 +00002692 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002693 * Also, we need to dma_unmap_single() the TxPAL.
2694 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002695 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002696 nr_txbds = frags + 2;
2697 else
2698 nr_txbds = frags + 1;
2699
2700 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002701
Claudiu Manoila7312d52015-03-13 10:36:28 +02002702 lstatus = be32_to_cpu(lbdp->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002703
2704 /* Only clean completed frames */
2705 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002706 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 break;
2708
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002709 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002710 next = next_txbd(bdp, base, tx_ring_size);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002711 buflen = be16_to_cpu(next->length) +
2712 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002713 } else
Claudiu Manoila7312d52015-03-13 10:36:28 +02002714 buflen = be16_to_cpu(bdp->length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002715
Claudiu Manoila7312d52015-03-13 10:36:28 +02002716 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002717 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002718
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002719 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002720 struct skb_shared_hwtstamps shhwtstamps;
Scott Woodb4b67f22015-07-29 16:13:06 +03002721 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2722 ~0x7UL);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002723
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002724 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
Yangbo Luf54af122016-02-24 17:26:56 +08002725 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002726 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002727 skb_tstamp_tx(skb, &shhwtstamps);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002728 gfar_clear_txbd_status(bdp);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002729 bdp = next;
2730 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002731
Claudiu Manoila7312d52015-03-13 10:36:28 +02002732 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002733 bdp = next_txbd(bdp, base, tx_ring_size);
2734
2735 for (i = 0; i < frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002736 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2737 be16_to_cpu(bdp->length),
2738 DMA_TO_DEVICE);
2739 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002740 bdp = next_txbd(bdp, base, tx_ring_size);
2741 }
2742
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002743 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002744
Eric Dumazetacb600d2012-10-05 06:23:55 +00002745 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002746
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002747 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002748
2749 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002750 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002751
Dai Harukid080cd62008-04-09 19:37:51 -05002752 howmany++;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002753 spin_lock(&tx_queue->txlock);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002754 tx_queue->num_txbdfree += nr_txbds;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002755 spin_unlock(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
Dai Haruki4669bc92008-12-17 16:51:04 -08002758 /* If we freed a buffer, we can restart transmission, if necessary */
Claudiu Manoil08511332014-02-24 12:13:45 +02002759 if (tx_queue->num_txbdfree &&
2760 netif_tx_queue_stopped(txq) &&
2761 !(test_bit(GFAR_DOWN, &priv->state)))
2762 netif_wake_subqueue(priv->ndev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
Dai Haruki4669bc92008-12-17 16:51:04 -08002764 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002765 tx_queue->skb_dirtytx = skb_dirtytx;
2766 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002768 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002769}
2770
Claudiu Manoil75354142015-07-13 16:22:06 +03002771static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002772{
Claudiu Manoil75354142015-07-13 16:22:06 +03002773 struct page *page;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002774 dma_addr_t addr;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002775
Claudiu Manoil75354142015-07-13 16:22:06 +03002776 page = dev_alloc_page();
2777 if (unlikely(!page))
2778 return false;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002779
Claudiu Manoil75354142015-07-13 16:22:06 +03002780 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2781 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2782 __free_page(page);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002783
Claudiu Manoil75354142015-07-13 16:22:06 +03002784 return false;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002785 }
2786
Claudiu Manoil75354142015-07-13 16:22:06 +03002787 rxb->dma = addr;
2788 rxb->page = page;
2789 rxb->page_offset = 0;
2790
2791 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792}
2793
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002794static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2795{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002796 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002797 struct gfar_extra_stats *estats = &priv->extra_stats;
2798
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002799 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002800 atomic64_inc(&estats->rx_alloc_err);
2801}
2802
2803static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2804 int alloc_cnt)
2805{
Claudiu Manoil75354142015-07-13 16:22:06 +03002806 struct rxbd8 *bdp;
2807 struct gfar_rx_buff *rxb;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002808 int i;
2809
2810 i = rx_queue->next_to_use;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002811 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002812 rxb = &rx_queue->rx_buff[i];
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002813
2814 while (alloc_cnt--) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002815 /* try reuse page */
2816 if (unlikely(!rxb->page)) {
2817 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002818 gfar_rx_alloc_err(rx_queue);
2819 break;
2820 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002821 }
2822
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002823 /* Setup the new RxBD */
Claudiu Manoil75354142015-07-13 16:22:06 +03002824 gfar_init_rxbdp(rx_queue, bdp,
2825 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002826
2827 /* Update to the next pointer */
Claudiu Manoil75354142015-07-13 16:22:06 +03002828 bdp++;
2829 rxb++;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002830
Claudiu Manoil75354142015-07-13 16:22:06 +03002831 if (unlikely(++i == rx_queue->rx_ring_size)) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002832 i = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +03002833 bdp = rx_queue->rx_bd_base;
2834 rxb = rx_queue->rx_buff;
2835 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002836 }
2837
2838 rx_queue->next_to_use = i;
Claudiu Manoil75354142015-07-13 16:22:06 +03002839 rx_queue->next_to_alloc = i;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002840}
2841
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002842static void count_errors(u32 lstatus, struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002844 struct gfar_private *priv = netdev_priv(ndev);
2845 struct net_device_stats *stats = &ndev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 struct gfar_extra_stats *estats = &priv->extra_stats;
2847
Jan Ceuleers0977f812012-06-05 03:42:12 +00002848 /* If the packet was truncated, none of the other errors matter */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002849 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 stats->rx_length_errors++;
2851
Paul Gortmaker212079d2013-02-12 15:38:19 -05002852 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
2854 return;
2855 }
2856 /* Count the errors, if there were any */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002857 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 stats->rx_length_errors++;
2859
Claudiu Manoilf9660822015-07-13 16:22:04 +03002860 if (lstatus & BD_LFLAG(RXBD_LARGE))
Paul Gortmaker212079d2013-02-12 15:38:19 -05002861 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002863 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002865 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002867 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002869 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002870 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 stats->rx_crc_errors++;
2872 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002873 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002874 atomic64_inc(&estats->rx_overrun);
Claudiu Manoilf9660822015-07-13 16:22:04 +03002875 stats->rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 }
2877}
2878
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002879irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880{
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002881 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2882 unsigned long flags;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002883 u32 imask, ievent;
2884
2885 ievent = gfar_read(&grp->regs->ievent);
2886
2887 if (unlikely(ievent & IEVENT_FGPI)) {
2888 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2889 return IRQ_HANDLED;
2890 }
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002891
2892 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2893 spin_lock_irqsave(&grp->grplock, flags);
2894 imask = gfar_read(&grp->regs->imask);
2895 imask &= IMASK_RX_DISABLED;
2896 gfar_write(&grp->regs->imask, imask);
2897 spin_unlock_irqrestore(&grp->grplock, flags);
2898 __napi_schedule(&grp->napi_rx);
2899 } else {
2900 /* Clear IEVENT, so interrupts aren't called again
2901 * because of the packets that have already arrived.
2902 */
2903 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2904 }
2905
2906 return IRQ_HANDLED;
2907}
2908
2909/* Interrupt Handler for Transmit complete */
2910static irqreturn_t gfar_transmit(int irq, void *grp_id)
2911{
2912 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2913 unsigned long flags;
2914 u32 imask;
2915
2916 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2917 spin_lock_irqsave(&grp->grplock, flags);
2918 imask = gfar_read(&grp->regs->imask);
2919 imask &= IMASK_TX_DISABLED;
2920 gfar_write(&grp->regs->imask, imask);
2921 spin_unlock_irqrestore(&grp->grplock, flags);
2922 __napi_schedule(&grp->napi_tx);
2923 } else {
2924 /* Clear IEVENT, so interrupts aren't called again
2925 * because of the packets that have already arrived.
2926 */
2927 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2928 }
2929
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 return IRQ_HANDLED;
2931}
2932
Claudiu Manoil75354142015-07-13 16:22:06 +03002933static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2934 struct sk_buff *skb, bool first)
2935{
Andy Spencer202a0a702018-01-25 19:37:50 -08002936 int size = lstatus & BD_LENGTH_MASK;
Claudiu Manoil75354142015-07-13 16:22:06 +03002937 struct page *page = rxb->page;
Claudiu Manoil75354142015-07-13 16:22:06 +03002938
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002939 if (likely(first)) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002940 skb_put(skb, size);
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002941 } else {
2942 /* the last fragments' length contains the full frame length */
Andy Spencerd903ec72018-02-22 11:05:33 -08002943 if (lstatus & BD_LFLAG(RXBD_LAST))
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002944 size -= skb->len;
2945
Andy Spencerd903ec72018-02-22 11:05:33 -08002946 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2947 rxb->page_offset + RXBUF_ALIGNMENT,
2948 size, GFAR_RXB_TRUESIZE);
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002949 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002950
2951 /* try reuse page */
Eric Dumazet69fed992017-01-18 19:44:42 -08002952 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
Claudiu Manoil75354142015-07-13 16:22:06 +03002953 return false;
2954
2955 /* change offset to the other half */
2956 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2957
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002958 page_ref_inc(page);
Claudiu Manoil75354142015-07-13 16:22:06 +03002959
2960 return true;
2961}
2962
2963static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2964 struct gfar_rx_buff *old_rxb)
2965{
2966 struct gfar_rx_buff *new_rxb;
2967 u16 nta = rxq->next_to_alloc;
2968
2969 new_rxb = &rxq->rx_buff[nta];
2970
2971 /* find next buf that can reuse a page */
2972 nta++;
2973 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2974
2975 /* copy page reference */
2976 *new_rxb = *old_rxb;
2977
2978 /* sync for use by the device */
2979 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2980 old_rxb->page_offset,
2981 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2982}
2983
2984static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2985 u32 lstatus, struct sk_buff *skb)
2986{
2987 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2988 struct page *page = rxb->page;
2989 bool first = false;
2990
2991 if (likely(!skb)) {
2992 void *buff_addr = page_address(page) + rxb->page_offset;
2993
2994 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2995 if (unlikely(!skb)) {
2996 gfar_rx_alloc_err(rx_queue);
2997 return NULL;
2998 }
2999 skb_reserve(skb, RXBUF_ALIGNMENT);
3000 first = true;
3001 }
3002
3003 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
3004 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
3005
3006 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
3007 /* reuse the free half of the page */
3008 gfar_reuse_rx_page(rx_queue, rxb);
3009 } else {
3010 /* page cannot be reused, unmap it */
3011 dma_unmap_page(rx_queue->dev, rxb->dma,
3012 PAGE_SIZE, DMA_FROM_DEVICE);
3013 }
3014
3015 /* clear rxb content */
3016 rxb->page = NULL;
3017
3018 return skb;
3019}
3020
Kumar Gala0bbaf062005-06-20 10:54:21 -05003021static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3022{
3023 /* If valid headers were found, and valid sums
3024 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00003025 * checksumming is necessary. Otherwise, it is [FIXME]
3026 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003027 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3028 (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05003029 skb->ip_summed = CHECKSUM_UNNECESSARY;
3030 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003031 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003032}
3033
Jan Ceuleers0977f812012-06-05 03:42:12 +00003034/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003035static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003037 struct gfar_private *priv = netdev_priv(ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003038 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039
Dai Haruki2c2db482008-12-16 15:31:15 -08003040 /* fcb is at the beginning if exists */
3041 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Jan Ceuleers0977f812012-06-05 03:42:12 +00003043 /* Remove the FCB from the skb
3044 * Remove the padded bytes, if there are any
3045 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003046 if (priv->uses_rxfcb)
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003047 skb_pull(skb, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003048
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003049 /* Get receive timestamp from the skb */
3050 if (priv->hwts_rx_en) {
3051 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3052 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003053
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003054 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
Yangbo Luf54af122016-02-24 17:26:56 +08003055 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003056 }
3057
3058 if (priv->padding)
3059 skb_pull(skb, priv->padding);
3060
Andy Spencerd903ec72018-02-22 11:05:33 -08003061 /* Trim off the FCS */
3062 pskb_trim(skb, skb->len - ETH_FCS_LEN);
3063
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003064 if (ndev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08003065 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003066
Patrick McHardyf6469682013-04-19 02:04:27 +00003067 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07003068 * Even if vlan rx accel is disabled, on some chips
3069 * RXFCB_VLN is pseudo randomly set.
3070 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003071 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003072 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3073 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3074 be16_to_cpu(fcb->vlctl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075}
3076
3077/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00003078 * until the budget/quota has been reached. Returns the number
3079 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003081int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003083 struct net_device *ndev = rx_queue->ndev;
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003084 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003085 struct rxbd8 *bdp;
3086 int i, howmany = 0;
3087 struct sk_buff *skb = rx_queue->skb;
3088 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3089 unsigned int total_bytes = 0, total_pkts = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
3091 /* Get the first full descriptor */
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003092 i = rx_queue->next_to_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003094 while (rx_work_limit--) {
Claudiu Manoilf9660822015-07-13 16:22:04 +03003095 u32 lstatus;
Dai Haruki2c2db482008-12-16 15:31:15 -08003096
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003097 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3098 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3099 cleaned_cnt = 0;
3100 }
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003101
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003102 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoilf9660822015-07-13 16:22:04 +03003103 lstatus = be32_to_cpu(bdp->lstatus);
3104 if (lstatus & BD_LFLAG(RXBD_EMPTY))
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003105 break;
3106
3107 /* order rx buffer descriptor reads */
Scott Wood3b6330c2007-05-16 15:06:59 -05003108 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05003109
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003110 /* fetch next to clean buffer from the ring */
Claudiu Manoil75354142015-07-13 16:22:06 +03003111 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3112 if (unlikely(!skb))
3113 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114
Claudiu Manoil75354142015-07-13 16:22:06 +03003115 cleaned_cnt++;
3116 howmany++;
Andy Fleming81183052008-11-12 10:07:11 -06003117
Claudiu Manoil75354142015-07-13 16:22:06 +03003118 if (unlikely(++i == rx_queue->rx_ring_size))
3119 i = 0;
Anton Vorontsov63b88b92010-06-11 10:51:03 +00003120
Claudiu Manoil75354142015-07-13 16:22:06 +03003121 rx_queue->next_to_clean = i;
3122
3123 /* fetch next buffer if not the last in frame */
3124 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3125 continue;
3126
3127 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003128 count_errors(lstatus, ndev);
Andy Fleming815b97c2008-04-22 17:18:29 -05003129
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003130 /* discard faulty buffer */
3131 dev_kfree_skb(skb);
Claudiu Manoil75354142015-07-13 16:22:06 +03003132 skb = NULL;
3133 rx_queue->stats.rx_dropped++;
3134 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 }
3136
Claudiu Manoil590399d2018-02-27 17:33:10 +02003137 gfar_process_frame(ndev, skb);
3138
Claudiu Manoil75354142015-07-13 16:22:06 +03003139 /* Increment the number of packets */
3140 total_pkts++;
3141 total_bytes += skb->len;
3142
3143 skb_record_rx_queue(skb, rx_queue->qindex);
3144
Claudiu Manoil590399d2018-02-27 17:33:10 +02003145 skb->protocol = eth_type_trans(skb, ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003146
3147 /* Send the packet up the stack */
3148 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3149
3150 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 }
3152
Claudiu Manoil75354142015-07-13 16:22:06 +03003153 /* Store incomplete frames for completion */
3154 rx_queue->skb = skb;
3155
3156 rx_queue->stats.rx_packets += total_pkts;
3157 rx_queue->stats.rx_bytes += total_bytes;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003158
3159 if (cleaned_cnt)
3160 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3161
3162 /* Update Last Free RxBD pointer for LFC */
3163 if (unlikely(priv->tx_actual_en)) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003164 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3165
3166 gfar_write(rx_queue->rfbptr, bdp_dma);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 return howmany;
3170}
3171
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003172static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003173{
3174 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003175 container_of(napi, struct gfar_priv_grp, napi_rx);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003176 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003177 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003178 int work_done = 0;
3179
3180 /* Clear IEVENT, so interrupts aren't called again
3181 * because of the packets that have already arrived
3182 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003183 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003184
3185 work_done = gfar_clean_rx_ring(rx_queue, budget);
3186
3187 if (work_done < budget) {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003188 u32 imask;
Eric Dumazet6ad20162017-01-30 08:22:01 -08003189 napi_complete_done(napi, work_done);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003190 /* Clear the halt bit in RSTAT */
3191 gfar_write(&regs->rstat, gfargrp->rstat);
3192
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003193 spin_lock_irq(&gfargrp->grplock);
3194 imask = gfar_read(&regs->imask);
3195 imask |= IMASK_RX_DEFAULT;
3196 gfar_write(&regs->imask, imask);
3197 spin_unlock_irq(&gfargrp->grplock);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003198 }
3199
3200 return work_done;
3201}
3202
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003203static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003205 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003206 container_of(napi, struct gfar_priv_grp, napi_tx);
3207 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003208 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003209 u32 imask;
3210
3211 /* Clear IEVENT, so interrupts aren't called again
3212 * because of the packets that have already arrived
3213 */
3214 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3215
3216 /* run Tx cleanup to completion */
3217 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3218 gfar_clean_tx_ring(tx_queue);
3219
3220 napi_complete(napi);
3221
3222 spin_lock_irq(&gfargrp->grplock);
3223 imask = gfar_read(&regs->imask);
3224 imask |= IMASK_TX_DEFAULT;
3225 gfar_write(&regs->imask, imask);
3226 spin_unlock_irq(&gfargrp->grplock);
3227
3228 return 0;
3229}
3230
3231static int gfar_poll_rx(struct napi_struct *napi, int budget)
3232{
3233 struct gfar_priv_grp *gfargrp =
3234 container_of(napi, struct gfar_priv_grp, napi_rx);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003235 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003236 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003237 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003238 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00003239 int i, budget_per_q = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003240 unsigned long rstat_rxf;
3241 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05003242
Dai Haruki8c7396a2008-12-17 16:52:00 -08003243 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00003244 * because of the packets that have already arrived
3245 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003246 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08003247
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003248 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3249
3250 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3251 if (num_act_queues)
3252 budget_per_q = budget/num_act_queues;
3253
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003254 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3255 /* skip queue if not active */
3256 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3257 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003258
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003259 rx_queue = priv->rx_queue[i];
3260 work_done_per_q =
3261 gfar_clean_rx_ring(rx_queue, budget_per_q);
3262 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003263
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003264 /* finished processing this queue */
3265 if (work_done_per_q < budget_per_q) {
3266 /* clear active queue hw indication */
3267 gfar_write(&regs->rstat,
3268 RSTAT_CLEAR_RXF0 >> i);
3269 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003270
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003271 if (!num_act_queues)
3272 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003273 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003274 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003275
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003276 if (!num_act_queues) {
3277 u32 imask;
Eric Dumazet6ad20162017-01-30 08:22:01 -08003278 napi_complete_done(napi, work_done);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003279
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003280 /* Clear the halt bit in RSTAT */
3281 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003282
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003283 spin_lock_irq(&gfargrp->grplock);
3284 imask = gfar_read(&regs->imask);
3285 imask |= IMASK_RX_DEFAULT;
3286 gfar_write(&regs->imask, imask);
3287 spin_unlock_irq(&gfargrp->grplock);
Dai Harukid080cd62008-04-09 19:37:51 -05003288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003290 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003293static int gfar_poll_tx(struct napi_struct *napi, int budget)
3294{
3295 struct gfar_priv_grp *gfargrp =
3296 container_of(napi, struct gfar_priv_grp, napi_tx);
3297 struct gfar_private *priv = gfargrp->priv;
3298 struct gfar __iomem *regs = gfargrp->regs;
3299 struct gfar_priv_tx_q *tx_queue = NULL;
3300 int has_tx_work = 0;
3301 int i;
3302
3303 /* Clear IEVENT, so interrupts aren't called again
3304 * because of the packets that have already arrived
3305 */
3306 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3307
3308 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3309 tx_queue = priv->tx_queue[i];
3310 /* run Tx cleanup to completion */
3311 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3312 gfar_clean_tx_ring(tx_queue);
3313 has_tx_work = 1;
3314 }
3315 }
3316
3317 if (!has_tx_work) {
3318 u32 imask;
3319 napi_complete(napi);
3320
3321 spin_lock_irq(&gfargrp->grplock);
3322 imask = gfar_read(&regs->imask);
3323 imask |= IMASK_TX_DEFAULT;
3324 gfar_write(&regs->imask, imask);
3325 spin_unlock_irq(&gfargrp->grplock);
3326 }
3327
3328 return 0;
3329}
3330
3331
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003332#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00003333/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003334 * without having to re-enable interrupts. It's not called while
3335 * the interrupt routine is executing.
3336 */
3337static void gfar_netpoll(struct net_device *dev)
3338{
3339 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00003340 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003341
3342 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003343 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003344 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003345 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3346
3347 disable_irq(gfar_irq(grp, TX)->irq);
3348 disable_irq(gfar_irq(grp, RX)->irq);
3349 disable_irq(gfar_irq(grp, ER)->irq);
3350 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3351 enable_irq(gfar_irq(grp, ER)->irq);
3352 enable_irq(gfar_irq(grp, RX)->irq);
3353 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003354 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003355 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003356 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003357 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3358
3359 disable_irq(gfar_irq(grp, TX)->irq);
3360 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3361 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003362 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003363 }
3364}
3365#endif
3366
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003368static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003370 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
3372 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003373 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003376 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003377 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378
3379 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003380 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003381 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003383 /* Check for errors */
3384 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003385 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386
3387 return IRQ_HANDLED;
3388}
3389
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390/* Called every time the controller might need to be made
3391 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003392 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 * function converts those variables into the appropriate
3394 * register values, and can bring down the device if needed.
3395 */
3396static void adjust_link(struct net_device *dev)
3397{
3398 struct gfar_private *priv = netdev_priv(dev);
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003399 struct phy_device *phydev = dev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003401 if (unlikely(phydev->link != priv->oldlink ||
Guenter Roeck0ae93b22015-03-02 12:03:27 -08003402 (phydev->link && (phydev->duplex != priv->oldduplex ||
3403 phydev->speed != priv->oldspeed))))
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003404 gfar_update_link_state(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003405}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406
3407/* Update the hash table based on the current list of multicast
3408 * addresses we subscribe to. Also, change the promiscuity of
3409 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003410 * whenever dev->flags is changed
3411 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412static void gfar_set_multi(struct net_device *dev)
3413{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003414 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003416 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 u32 tempval;
3418
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003419 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 /* Set RCTRL to PROM */
3421 tempval = gfar_read(&regs->rctrl);
3422 tempval |= RCTRL_PROM;
3423 gfar_write(&regs->rctrl, tempval);
3424 } else {
3425 /* Set RCTRL to not PROM */
3426 tempval = gfar_read(&regs->rctrl);
3427 tempval &= ~(RCTRL_PROM);
3428 gfar_write(&regs->rctrl, tempval);
3429 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003430
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003431 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003433 gfar_write(&regs->igaddr0, 0xffffffff);
3434 gfar_write(&regs->igaddr1, 0xffffffff);
3435 gfar_write(&regs->igaddr2, 0xffffffff);
3436 gfar_write(&regs->igaddr3, 0xffffffff);
3437 gfar_write(&regs->igaddr4, 0xffffffff);
3438 gfar_write(&regs->igaddr5, 0xffffffff);
3439 gfar_write(&regs->igaddr6, 0xffffffff);
3440 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 gfar_write(&regs->gaddr0, 0xffffffff);
3442 gfar_write(&regs->gaddr1, 0xffffffff);
3443 gfar_write(&regs->gaddr2, 0xffffffff);
3444 gfar_write(&regs->gaddr3, 0xffffffff);
3445 gfar_write(&regs->gaddr4, 0xffffffff);
3446 gfar_write(&regs->gaddr5, 0xffffffff);
3447 gfar_write(&regs->gaddr6, 0xffffffff);
3448 gfar_write(&regs->gaddr7, 0xffffffff);
3449 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003450 int em_num;
3451 int idx;
3452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003454 gfar_write(&regs->igaddr0, 0x0);
3455 gfar_write(&regs->igaddr1, 0x0);
3456 gfar_write(&regs->igaddr2, 0x0);
3457 gfar_write(&regs->igaddr3, 0x0);
3458 gfar_write(&regs->igaddr4, 0x0);
3459 gfar_write(&regs->igaddr5, 0x0);
3460 gfar_write(&regs->igaddr6, 0x0);
3461 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 gfar_write(&regs->gaddr0, 0x0);
3463 gfar_write(&regs->gaddr1, 0x0);
3464 gfar_write(&regs->gaddr2, 0x0);
3465 gfar_write(&regs->gaddr3, 0x0);
3466 gfar_write(&regs->gaddr4, 0x0);
3467 gfar_write(&regs->gaddr5, 0x0);
3468 gfar_write(&regs->gaddr6, 0x0);
3469 gfar_write(&regs->gaddr7, 0x0);
3470
Andy Fleming7f7f5312005-11-11 12:38:59 -06003471 /* If we have extended hash tables, we need to
3472 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003473 * setting them
3474 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003475 if (priv->extended_hash) {
3476 em_num = GFAR_EM_NUM + 1;
3477 gfar_clear_exact_match(dev);
3478 idx = 1;
3479 } else {
3480 idx = 0;
3481 em_num = 0;
3482 }
3483
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003484 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 return;
3486
3487 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003488 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003489 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003490 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003491 idx++;
3492 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003493 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494 }
3495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496}
3497
Andy Fleming7f7f5312005-11-11 12:38:59 -06003498
3499/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003500 * don't interfere with normal reception
3501 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003502static void gfar_clear_exact_match(struct net_device *dev)
3503{
3504 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003505 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003506
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003507 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003508 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003509}
3510
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511/* Set the appropriate hash bit for the given addr */
3512/* The algorithm works like so:
3513 * 1) Take the Destination Address (ie the multicast address), and
3514 * do a CRC on it (little endian), and reverse the bits of the
3515 * result.
3516 * 2) Use the 8 most significant bits as a hash into a 256-entry
3517 * table. The table is controlled through 8 32-bit registers:
3518 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3519 * gaddr7. This means that the 3 most significant bits in the
3520 * hash index which gaddr register to use, and the 5 other bits
3521 * indicate which bit (assuming an IBM numbering scheme, which
3522 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003523 * the entry.
3524 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3526{
3527 u32 tempval;
3528 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c910c2011-11-16 09:38:02 +00003529 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003530 int width = priv->hash_width;
3531 u8 whichbit = (result >> (32 - width)) & 0x1f;
3532 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 u32 value = (1 << (31-whichbit));
3534
Kumar Gala0bbaf062005-06-20 10:54:21 -05003535 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003537 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538}
3539
Andy Fleming7f7f5312005-11-11 12:38:59 -06003540
3541/* There are multiple MAC Address register pairs on some controllers
3542 * This function sets the numth pair to a given address
3543 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003544static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3545 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003546{
3547 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003548 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003549 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003550 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003551
3552 macptr += num*2;
3553
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003554 /* For a station address of 0x12345678ABCD in transmission
3555 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3556 * MACnADDR2 is set to 0x34120000.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003557 */
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003558 tempval = (addr[5] << 24) | (addr[4] << 16) |
3559 (addr[3] << 8) | addr[2];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003560
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003561 gfar_write(macptr, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003562
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003563 tempval = (addr[1] << 24) | (addr[0] << 16);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003564
3565 gfar_write(macptr+1, tempval);
3566}
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003569static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003571 struct gfar_priv_grp *gfargrp = grp_id;
3572 struct gfar __iomem *regs = gfargrp->regs;
3573 struct gfar_private *priv= gfargrp->priv;
3574 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
3576 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003577 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
3579 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003580 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003581
3582 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003583 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003584 (events & IEVENT_MAG))
3585 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586
3587 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003588 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003589 netdev_dbg(dev,
3590 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003591 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
3593 /* Update the error counters */
3594 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003595 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596
3597 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003598 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003600 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 if (events & IEVENT_XFUN) {
Joe Perches59deab22011-06-14 08:57:47 +00003602 netif_dbg(priv, tx_err, dev,
3603 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003604 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003605 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606
Claudiu Manoilbc602282015-05-06 18:07:29 +03003607 schedule_work(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 }
Joe Perches59deab22011-06-14 08:57:47 +00003609 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 }
3611 if (events & IEVENT_BSY) {
Claudiu Manoil1de65a52015-10-23 11:42:00 +03003612 dev->stats.rx_over_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003613 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614
Joe Perches59deab22011-06-14 08:57:47 +00003615 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3616 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003619 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003620 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621
Joe Perches59deab22011-06-14 08:57:47 +00003622 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 }
3624 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003625 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003626 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 }
Joe Perches59deab22011-06-14 08:57:47 +00003628 if (events & IEVENT_RXC)
3629 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630
3631 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003632 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003633 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 }
3635 return IRQ_HANDLED;
3636}
3637
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003638static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3639{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003640 struct net_device *ndev = priv->ndev;
3641 struct phy_device *phydev = ndev->phydev;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003642 u32 val = 0;
3643
3644 if (!phydev->duplex)
3645 return val;
3646
3647 if (!priv->pause_aneg_en) {
3648 if (priv->tx_pause_en)
3649 val |= MACCFG1_TX_FLOW;
3650 if (priv->rx_pause_en)
3651 val |= MACCFG1_RX_FLOW;
3652 } else {
3653 u16 lcl_adv, rmt_adv;
3654 u8 flowctrl;
3655 /* get link partner capabilities */
3656 rmt_adv = 0;
3657 if (phydev->pause)
3658 rmt_adv = LPA_PAUSE_CAP;
3659 if (phydev->asym_pause)
3660 rmt_adv |= LPA_PAUSE_ASYM;
3661
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01003662 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003663 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3664 if (flowctrl & FLOW_CTRL_TX)
3665 val |= MACCFG1_TX_FLOW;
3666 if (flowctrl & FLOW_CTRL_RX)
3667 val |= MACCFG1_RX_FLOW;
3668 }
3669
3670 return val;
3671}
3672
3673static noinline void gfar_update_link_state(struct gfar_private *priv)
3674{
3675 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003676 struct net_device *ndev = priv->ndev;
3677 struct phy_device *phydev = ndev->phydev;
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003678 struct gfar_priv_rx_q *rx_queue = NULL;
3679 int i;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003680
3681 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3682 return;
3683
3684 if (phydev->link) {
3685 u32 tempval1 = gfar_read(&regs->maccfg1);
3686 u32 tempval = gfar_read(&regs->maccfg2);
3687 u32 ecntrl = gfar_read(&regs->ecntrl);
Claudiu Manoil5d621672017-09-04 10:45:28 +03003688 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003689
3690 if (phydev->duplex != priv->oldduplex) {
3691 if (!(phydev->duplex))
3692 tempval &= ~(MACCFG2_FULL_DUPLEX);
3693 else
3694 tempval |= MACCFG2_FULL_DUPLEX;
3695
3696 priv->oldduplex = phydev->duplex;
3697 }
3698
3699 if (phydev->speed != priv->oldspeed) {
3700 switch (phydev->speed) {
3701 case 1000:
3702 tempval =
3703 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3704
3705 ecntrl &= ~(ECNTRL_R100);
3706 break;
3707 case 100:
3708 case 10:
3709 tempval =
3710 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3711
3712 /* Reduced mode distinguishes
3713 * between 10 and 100
3714 */
3715 if (phydev->speed == SPEED_100)
3716 ecntrl |= ECNTRL_R100;
3717 else
3718 ecntrl &= ~(ECNTRL_R100);
3719 break;
3720 default:
3721 netif_warn(priv, link, priv->ndev,
3722 "Ack! Speed (%d) is not 10/100/1000!\n",
3723 phydev->speed);
3724 break;
3725 }
3726
3727 priv->oldspeed = phydev->speed;
3728 }
3729
3730 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3731 tempval1 |= gfar_get_flowctrl_cfg(priv);
3732
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003733 /* Turn last free buffer recording on */
3734 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3735 for (i = 0; i < priv->num_rx_queues; i++) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003736 u32 bdp_dma;
3737
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003738 rx_queue = priv->rx_queue[i];
Scott Woodb4b67f22015-07-29 16:13:06 +03003739 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3740 gfar_write(rx_queue->rfbptr, bdp_dma);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003741 }
3742
3743 priv->tx_actual_en = 1;
3744 }
3745
3746 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3747 priv->tx_actual_en = 0;
3748
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003749 gfar_write(&regs->maccfg1, tempval1);
3750 gfar_write(&regs->maccfg2, tempval);
3751 gfar_write(&regs->ecntrl, ecntrl);
3752
3753 if (!priv->oldlink)
3754 priv->oldlink = 1;
3755
3756 } else if (priv->oldlink) {
3757 priv->oldlink = 0;
3758 priv->oldspeed = 0;
3759 priv->oldduplex = -1;
3760 }
3761
3762 if (netif_msg_link(priv))
3763 phy_print_status(phydev);
3764}
3765
Fabian Frederick94e5a2a2015-03-17 19:37:34 +01003766static const struct of_device_id gfar_match[] =
Andy Flemingb31a1d82008-12-16 15:29:15 -08003767{
3768 {
3769 .type = "network",
3770 .compatible = "gianfar",
3771 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003772 {
3773 .compatible = "fsl,etsec2",
3774 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003775 {},
3776};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003777MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003778
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003780static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003781 .driver = {
3782 .name = "fsl-gianfar",
Grant Likely40182942010-04-13 16:13:02 -07003783 .pm = GFAR_PM_OPS,
3784 .of_match_table = gfar_match,
3785 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 .probe = gfar_probe,
3787 .remove = gfar_remove,
3788};
3789
Axel Lindb62f682011-11-27 16:44:17 +00003790module_platform_driver(gfar_driver);