blob: 0e102c764b133da2420d8adefc098a6b39e168fb [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Claudiu Manoil20862782014-02-17 12:53:14 +020012 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050077#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/spinlock.h>
79#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050080#include <linux/of_address.h>
81#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030091#ifdef CONFIG_PPC
Anton Vorontsov7d350972010-06-30 06:39:12 +000092#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030093#include <asm/mpc85xx.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030094#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/irq.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080096#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400100#include <linux/mii.h>
101#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -0800102#include <linux/phy_fixed.h>
103#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700104#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Abhimanyu8fcc6032015-10-27 14:17:43 +0530108#define TX_TIMEOUT (5*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Claudiu Manoil75354142015-07-13 16:22:06 +0300110const char gfar_driver_version[] = "2.0";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static int gfar_enet_open(struct net_device *dev);
YueHaibing06983aa2018-09-21 10:50:32 +0800113static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200114static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static void gfar_timeout(struct net_device *dev);
116static int gfar_close(struct net_device *dev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300117static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
118 int alloc_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static int gfar_set_mac_address(struct net_device *dev);
120static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100121static irqreturn_t gfar_error(int irq, void *dev_id);
122static irqreturn_t gfar_transmit(int irq, void *dev_id);
123static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124static void adjust_link(struct net_device *dev);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +0300125static noinline void gfar_update_link_state(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700127static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600128static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400129static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130static void gfar_set_multi(struct net_device *dev);
131static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500132static void gfar_configure_serdes(struct net_device *dev);
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200133static int gfar_poll_rx(struct napi_struct *napi, int budget);
134static int gfar_poll_tx(struct napi_struct *napi, int budget);
135static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
136static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300137#ifdef CONFIG_NET_POLL_CONTROLLER
138static void gfar_netpoll(struct net_device *dev);
139#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000140int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000141static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300142static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
Claudiu Manoilc10650b2014-02-17 12:53:18 +0200143static void gfar_halt_nodisable(struct gfar_private *priv);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600144static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800145static void gfar_set_mac_for_addr(struct net_device *dev, int num,
146 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149MODULE_AUTHOR("Freescale Semiconductor, Inc");
150MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151MODULE_LICENSE("GPL");
152
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000154 dma_addr_t buf)
155{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 u32 lstatus;
157
Claudiu Manoila7312d52015-03-13 10:36:28 +0200158 bdp->bufPtr = cpu_to_be32(buf);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000159
160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000162 lstatus |= BD_LFLAG(RXBD_WRAP);
163
Claudiu Manoild55398b2014-10-07 10:44:35 +0300164 gfar_wmb();
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000165
Claudiu Manoila7312d52015-03-13 10:36:28 +0200166 bdp->lstatus = cpu_to_be32(lstatus);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000167}
168
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300169static void gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000170{
Anton Vorontsov87283272009-10-12 06:00:39 +0000171 struct gfar_private *priv = netdev_priv(ndev);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200172 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000173 struct gfar_priv_tx_q *tx_queue = NULL;
174 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000175 struct txbd8 *txbdp;
Kevin Hao03366a332014-12-24 14:05:45 +0800176 u32 __iomem *rfbptr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000177 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000178
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000179 for (i = 0; i < priv->num_tx_queues; i++) {
180 tx_queue = priv->tx_queue[i];
181 /* Initialize some variables in our dev structure */
182 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
183 tx_queue->dirty_tx = tx_queue->tx_bd_base;
184 tx_queue->cur_tx = tx_queue->tx_bd_base;
185 tx_queue->skb_curtx = 0;
186 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000187
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000188 /* Initialize Transmit Descriptor Ring */
189 txbdp = tx_queue->tx_bd_base;
190 for (j = 0; j < tx_queue->tx_ring_size; j++) {
191 txbdp->lstatus = 0;
192 txbdp->bufPtr = 0;
193 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000194 }
195
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000196 /* Set the last descriptor in the ring to indicate wrap */
197 txbdp--;
Claudiu Manoila7312d52015-03-13 10:36:28 +0200198 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
199 TXBD_WRAP);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000200 }
201
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200202 rfbptr = &regs->rfbptr0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000203 for (i = 0; i < priv->num_rx_queues; i++) {
204 rx_queue = priv->rx_queue[i];
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000205
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300206 rx_queue->next_to_clean = 0;
207 rx_queue->next_to_use = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +0300208 rx_queue->next_to_alloc = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000209
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300210 /* make sure next_to_clean != next_to_use after this
211 * by leaving at least 1 unused descriptor
212 */
213 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000214
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200215 rx_queue->rfbptr = rfbptr;
216 rfbptr += 2;
Anton Vorontsov87283272009-10-12 06:00:39 +0000217 }
Anton Vorontsov87283272009-10-12 06:00:39 +0000218}
219
220static int gfar_alloc_skb_resources(struct net_device *ndev)
221{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000222 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000223 dma_addr_t addr;
Claudiu Manoil75354142015-07-13 16:22:06 +0300224 int i, j;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000225 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000226 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000227 struct gfar_priv_tx_q *tx_queue = NULL;
228 struct gfar_priv_rx_q *rx_queue = NULL;
229
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000230 priv->total_tx_ring_size = 0;
231 for (i = 0; i < priv->num_tx_queues; i++)
232 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
233
234 priv->total_rx_ring_size = 0;
235 for (i = 0; i < priv->num_rx_queues; i++)
236 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000237
238 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000239 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000240 (priv->total_tx_ring_size *
241 sizeof(struct txbd8)) +
242 (priv->total_rx_ring_size *
243 sizeof(struct rxbd8)),
244 &addr, GFP_KERNEL);
245 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000246 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000247
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000248 for (i = 0; i < priv->num_tx_queues; i++) {
249 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000250 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000251 tx_queue->tx_bd_dma_base = addr;
252 tx_queue->dev = ndev;
253 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000254 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
255 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000256 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000257
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000258 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000259 for (i = 0; i < priv->num_rx_queues; i++) {
260 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000261 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000262 rx_queue->rx_bd_dma_base = addr;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300263 rx_queue->ndev = ndev;
Claudiu Manoil75354142015-07-13 16:22:06 +0300264 rx_queue->dev = dev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000265 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
266 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000267 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000268
269 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000270 for (i = 0; i < priv->num_tx_queues; i++) {
271 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000272 tx_queue->tx_skbuff =
273 kmalloc_array(tx_queue->tx_ring_size,
274 sizeof(*tx_queue->tx_skbuff),
275 GFP_KERNEL);
276 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000277 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000278
Claudiu Manoil75354142015-07-13 16:22:06 +0300279 for (j = 0; j < tx_queue->tx_ring_size; j++)
280 tx_queue->tx_skbuff[j] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000281 }
282
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000283 for (i = 0; i < priv->num_rx_queues; i++) {
284 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +0300285 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
286 sizeof(*rx_queue->rx_buff),
287 GFP_KERNEL);
288 if (!rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000289 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000290 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000291
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300292 gfar_init_bds(ndev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000293
294 return 0;
295
296cleanup:
297 free_skb_resources(priv);
298 return -ENOMEM;
299}
300
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301static void gfar_init_tx_rx_base(struct gfar_private *priv)
302{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000303 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000304 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000305 int i;
306
307 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000308 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000309 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000310 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000311 }
312
313 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000314 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000315 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000316 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000317 }
318}
319
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200320static void gfar_init_rqprm(struct gfar_private *priv)
321{
322 struct gfar __iomem *regs = priv->gfargrp[0].regs;
323 u32 __iomem *baddr;
324 int i;
325
326 baddr = &regs->rqprm0;
327 for (i = 0; i < priv->num_rx_queues; i++) {
328 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
329 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
330 baddr++;
331 }
332}
333
Claudiu Manoil75354142015-07-13 16:22:06 +0300334static void gfar_rx_offload_en(struct gfar_private *priv)
Claudiu Manoil88302642014-02-24 12:13:43 +0200335{
Claudiu Manoil88302642014-02-24 12:13:43 +0200336 /* set this when rx hw offload (TOE) functions are being used */
337 priv->uses_rxfcb = 0;
338
339 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
340 priv->uses_rxfcb = 1;
341
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300342 if (priv->hwts_rx_en || priv->rx_filer_enable)
Claudiu Manoil88302642014-02-24 12:13:43 +0200343 priv->uses_rxfcb = 1;
Claudiu Manoil88302642014-02-24 12:13:43 +0200344}
345
Claudiu Manoila328ac92014-02-24 12:13:42 +0200346static void gfar_mac_rx_config(struct gfar_private *priv)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000347{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000348 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000349 u32 rctrl = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000350
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000351 if (priv->rx_filer_enable) {
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300352 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000353 /* Program the RIR0 reg with the required distribution */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200354 if (priv->poll_mode == GFAR_SQ_POLLING)
355 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
356 else /* GFAR_MQ_POLLING */
357 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000358 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000359
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000360 /* Restore PROMISC mode */
Claudiu Manoila328ac92014-02-24 12:13:42 +0200361 if (priv->ndev->flags & IFF_PROMISC)
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000362 rctrl |= RCTRL_PROM;
363
Claudiu Manoil88302642014-02-24 12:13:43 +0200364 if (priv->ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000365 rctrl |= RCTRL_CHECKSUMMING;
366
Claudiu Manoil88302642014-02-24 12:13:43 +0200367 if (priv->extended_hash)
368 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000369
370 if (priv->padding) {
371 rctrl &= ~RCTRL_PAL_MASK;
372 rctrl |= RCTRL_PADDING(priv->padding);
373 }
374
Manfred Rudigier97553f72010-06-11 01:49:05 +0000375 /* Enable HW time stamping if requested from user space */
Claudiu Manoil88302642014-02-24 12:13:43 +0200376 if (priv->hwts_rx_en)
Manfred Rudigier97553f72010-06-11 01:49:05 +0000377 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
378
Claudiu Manoil88302642014-02-24 12:13:43 +0200379 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000381
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200382 /* Clear the LFC bit */
383 gfar_write(&regs->rctrl, rctrl);
384 /* Init flow control threshold values */
385 gfar_init_rqprm(priv);
386 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
387 rctrl |= RCTRL_LFC;
388
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000389 /* Init rctrl based on our settings */
390 gfar_write(&regs->rctrl, rctrl);
Claudiu Manoila328ac92014-02-24 12:13:42 +0200391}
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000392
Claudiu Manoila328ac92014-02-24 12:13:42 +0200393static void gfar_mac_tx_config(struct gfar_private *priv)
394{
395 struct gfar __iomem *regs = priv->gfargrp[0].regs;
396 u32 tctrl = 0;
397
398 if (priv->ndev->features & NETIF_F_IP_CSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000399 tctrl |= TCTRL_INIT_CSUM;
400
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000401 if (priv->prio_sched_en)
402 tctrl |= TCTRL_TXSCHED_PRIO;
403 else {
404 tctrl |= TCTRL_TXSCHED_WRRS;
405 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
406 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
407 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000408
Claudiu Manoil88302642014-02-24 12:13:43 +0200409 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
410 tctrl |= TCTRL_VLINS;
411
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000412 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000413}
414
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200415static void gfar_configure_coalescing(struct gfar_private *priv,
416 unsigned long tx_mask, unsigned long rx_mask)
417{
418 struct gfar __iomem *regs = priv->gfargrp[0].regs;
419 u32 __iomem *baddr;
420
421 if (priv->mode == MQ_MG_MODE) {
422 int i = 0;
423
424 baddr = &regs->txic0;
425 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
426 gfar_write(baddr + i, 0);
427 if (likely(priv->tx_queue[i]->txcoalescing))
428 gfar_write(baddr + i, priv->tx_queue[i]->txic);
429 }
430
431 baddr = &regs->rxic0;
432 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
433 gfar_write(baddr + i, 0);
434 if (likely(priv->rx_queue[i]->rxcoalescing))
435 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
436 }
437 } else {
438 /* Backward compatible case -- even if we enable
439 * multiple queues, there's only single reg to program
440 */
441 gfar_write(&regs->txic, 0);
442 if (likely(priv->tx_queue[0]->txcoalescing))
443 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
444
445 gfar_write(&regs->rxic, 0);
446 if (unlikely(priv->rx_queue[0]->rxcoalescing))
447 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
448 }
449}
450
451void gfar_configure_coalescing_all(struct gfar_private *priv)
452{
453 gfar_configure_coalescing(priv, 0xFF, 0xFF);
454}
455
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000456static struct net_device_stats *gfar_get_stats(struct net_device *dev)
457{
458 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000459 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
460 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000461 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000462
463 for (i = 0; i < priv->num_rx_queues; i++) {
464 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000465 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000466 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
467 }
468
469 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000470 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000471 dev->stats.rx_dropped = rx_dropped;
472
473 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000474 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
475 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000476 }
477
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000478 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000479 dev->stats.tx_packets = tx_packets;
480
481 return &dev->stats;
482}
483
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300484static int gfar_set_mac_addr(struct net_device *dev, void *p)
485{
486 eth_mac_addr(dev, p);
487
488 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
489
490 return 0;
491}
492
Andy Fleming26ccfc32009-03-10 12:58:28 +0000493static const struct net_device_ops gfar_netdev_ops = {
494 .ndo_open = gfar_enet_open,
495 .ndo_start_xmit = gfar_start_xmit,
496 .ndo_stop = gfar_close,
497 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000498 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000499 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000500 .ndo_tx_timeout = gfar_timeout,
501 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000502 .ndo_get_stats = gfar_get_stats,
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300503 .ndo_set_mac_address = gfar_set_mac_addr,
Ben Hutchings240c1022009-07-09 17:54:35 +0000504 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000505#ifdef CONFIG_NET_POLL_CONTROLLER
506 .ndo_poll_controller = gfar_netpoll,
507#endif
508};
509
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200510static void gfar_ints_disable(struct gfar_private *priv)
511{
512 int i;
513 for (i = 0; i < priv->num_grps; i++) {
514 struct gfar __iomem *regs = priv->gfargrp[i].regs;
515 /* Clear IEVENT */
516 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
517
518 /* Initialize IMASK */
519 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
520 }
521}
522
523static void gfar_ints_enable(struct gfar_private *priv)
524{
525 int i;
526 for (i = 0; i < priv->num_grps; i++) {
527 struct gfar __iomem *regs = priv->gfargrp[i].regs;
528 /* Unmask the interrupts we look for */
529 gfar_write(&regs->imask, IMASK_DEFAULT);
530 }
531}
532
Claudiu Manoil20862782014-02-17 12:53:14 +0200533static int gfar_alloc_tx_queues(struct gfar_private *priv)
534{
535 int i;
536
537 for (i = 0; i < priv->num_tx_queues; i++) {
538 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
539 GFP_KERNEL);
540 if (!priv->tx_queue[i])
541 return -ENOMEM;
542
543 priv->tx_queue[i]->tx_skbuff = NULL;
544 priv->tx_queue[i]->qindex = i;
545 priv->tx_queue[i]->dev = priv->ndev;
546 spin_lock_init(&(priv->tx_queue[i]->txlock));
547 }
548 return 0;
549}
550
551static int gfar_alloc_rx_queues(struct gfar_private *priv)
552{
553 int i;
554
555 for (i = 0; i < priv->num_rx_queues; i++) {
556 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
557 GFP_KERNEL);
558 if (!priv->rx_queue[i])
559 return -ENOMEM;
560
Claudiu Manoil20862782014-02-17 12:53:14 +0200561 priv->rx_queue[i]->qindex = i;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300562 priv->rx_queue[i]->ndev = priv->ndev;
Claudiu Manoil20862782014-02-17 12:53:14 +0200563 }
564 return 0;
565}
566
567static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000568{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000569 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000570
571 for (i = 0; i < priv->num_tx_queues; i++)
572 kfree(priv->tx_queue[i]);
573}
574
Claudiu Manoil20862782014-02-17 12:53:14 +0200575static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000576{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000577 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000578
579 for (i = 0; i < priv->num_rx_queues; i++)
580 kfree(priv->rx_queue[i]);
581}
582
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000583static void unmap_group_regs(struct gfar_private *priv)
584{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000585 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000586
587 for (i = 0; i < MAXGROUPS; i++)
588 if (priv->gfargrp[i].regs)
589 iounmap(priv->gfargrp[i].regs);
590}
591
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000592static void free_gfar_dev(struct gfar_private *priv)
593{
594 int i, j;
595
596 for (i = 0; i < priv->num_grps; i++)
597 for (j = 0; j < GFAR_NUM_IRQS; j++) {
598 kfree(priv->gfargrp[i].irqinfo[j]);
599 priv->gfargrp[i].irqinfo[j] = NULL;
600 }
601
602 free_netdev(priv->ndev);
603}
604
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000605static void disable_napi(struct gfar_private *priv)
606{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000607 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000608
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200609 for (i = 0; i < priv->num_grps; i++) {
610 napi_disable(&priv->gfargrp[i].napi_rx);
611 napi_disable(&priv->gfargrp[i].napi_tx);
612 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000613}
614
615static void enable_napi(struct gfar_private *priv)
616{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000617 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000618
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200619 for (i = 0; i < priv->num_grps; i++) {
620 napi_enable(&priv->gfargrp[i].napi_rx);
621 napi_enable(&priv->gfargrp[i].napi_tx);
622 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000623}
624
625static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000626 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000627{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000628 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000629 int i;
630
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000631 for (i = 0; i < GFAR_NUM_IRQS; i++) {
632 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
633 GFP_KERNEL);
634 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000635 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000636 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000637
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000638 grp->regs = of_iomap(np, 0);
639 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000640 return -ENOMEM;
641
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000642 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000643
644 /* If we aren't the FEC we have multiple interrupts */
645 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000646 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
647 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
Mark Brownfea0f662015-11-26 11:59:45 +0000648 if (!gfar_irq(grp, TX)->irq ||
649 !gfar_irq(grp, RX)->irq ||
650 !gfar_irq(grp, ER)->irq)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000651 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000652 }
653
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000654 grp->priv = priv;
655 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000656 if (priv->mode == MQ_MG_MODE) {
Jingchang Lu55917642015-03-13 10:52:32 +0200657 u32 rxq_mask, txq_mask;
658 int ret;
659
660 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
661 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
662
663 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
664 if (!ret) {
665 grp->rx_bit_map = rxq_mask ?
666 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
667 }
668
669 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
670 if (!ret) {
671 grp->tx_bit_map = txq_mask ?
672 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
673 }
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200674
675 if (priv->poll_mode == GFAR_SQ_POLLING) {
676 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
677 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200679 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000680 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000681 grp->rx_bit_map = 0xFF;
682 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000683 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200684
685 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
686 * right to left, so we need to revert the 8 bits to get the q index
687 */
688 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
689 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
690
691 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
692 * also assign queues to groups
693 */
694 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200695 if (!grp->rx_queue)
696 grp->rx_queue = priv->rx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200697 grp->num_rx_queues++;
698 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
699 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
700 priv->rx_queue[i]->grp = grp;
701 }
702
703 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200704 if (!grp->tx_queue)
705 grp->tx_queue = priv->tx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200706 grp->num_tx_queues++;
707 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
708 priv->tqueue |= (TQUEUE_EN0 >> i);
709 priv->tx_queue[i]->grp = grp;
710 }
711
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000712 priv->num_grps++;
713
714 return 0;
715}
716
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100717static int gfar_of_group_count(struct device_node *np)
718{
719 struct device_node *child;
720 int num = 0;
721
722 for_each_available_child_of_node(np, child)
723 if (!of_node_cmp(child->name, "queue-group"))
724 num++;
725
726 return num;
727}
728
Grant Likely2dc11582010-08-06 09:25:50 -0600729static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800730{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800731 const char *model;
732 const char *ctype;
733 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000734 int err = 0, i;
735 struct net_device *dev = NULL;
736 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700737 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000738 struct device_node *child = NULL;
Jingchang Lu55917642015-03-13 10:52:32 +0200739 u32 stash_len = 0;
740 u32 stash_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000741 unsigned int num_tx_qs, num_rx_qs;
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200742 unsigned short mode, poll_mode;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800743
Kevin Hao4b222ca2015-01-28 20:06:48 +0800744 if (!np)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800745 return -ENODEV;
746
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200747 if (of_device_is_compatible(np, "fsl,etsec2")) {
748 mode = MQ_MG_MODE;
749 poll_mode = GFAR_SQ_POLLING;
750 } else {
751 mode = SQ_SG_MODE;
752 poll_mode = GFAR_SQ_POLLING;
753 }
754
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200755 if (mode == SQ_SG_MODE) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200756 num_tx_qs = 1;
757 num_rx_qs = 1;
758 } else { /* MQ_MG_MODE */
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200759 /* get the actual number of supported groups */
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100760 unsigned int num_grps = gfar_of_group_count(np);
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200761
762 if (num_grps == 0 || num_grps > MAXGROUPS) {
763 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
764 num_grps);
765 pr_err("Cannot do alloc_etherdev, aborting\n");
766 return -EINVAL;
767 }
768
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200769 if (poll_mode == GFAR_SQ_POLLING) {
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200770 num_tx_qs = num_grps; /* one txq per int group */
771 num_rx_qs = num_grps; /* one rxq per int group */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200772 } else { /* GFAR_MQ_POLLING */
Jingchang Lu55917642015-03-13 10:52:32 +0200773 u32 tx_queues, rx_queues;
774 int ret;
775
776 /* parse the num of HW tx and rx queues */
777 ret = of_property_read_u32(np, "fsl,num_tx_queues",
778 &tx_queues);
779 num_tx_qs = ret ? 1 : tx_queues;
780
781 ret = of_property_read_u32(np, "fsl,num_rx_queues",
782 &rx_queues);
783 num_rx_qs = ret ? 1 : rx_queues;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200784 }
785 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000786
787 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000788 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
789 num_tx_qs, MAX_TX_QS);
790 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000791 return -EINVAL;
792 }
793
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000794 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000795 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
796 num_rx_qs, MAX_RX_QS);
797 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000798 return -EINVAL;
799 }
800
801 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
802 dev = *pdev;
803 if (NULL == dev)
804 return -ENOMEM;
805
806 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000807 priv->ndev = dev;
808
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200809 priv->mode = mode;
810 priv->poll_mode = poll_mode;
811
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000812 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000813 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000814 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200815
816 err = gfar_alloc_tx_queues(priv);
817 if (err)
818 goto tx_alloc_failed;
819
820 err = gfar_alloc_rx_queues(priv);
821 if (err)
822 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800823
Jingchang Lu55917642015-03-13 10:52:32 +0200824 err = of_property_read_string(np, "model", &model);
825 if (err) {
826 pr_err("Device model property missing, aborting\n");
827 goto rx_alloc_failed;
828 }
829
Jan Ceuleers0977f812012-06-05 03:42:12 +0000830 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700831 INIT_LIST_HEAD(&priv->rx_list.list);
832 priv->rx_list.count = 0;
833 mutex_init(&priv->rx_queue_access);
834
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000835 for (i = 0; i < MAXGROUPS; i++)
836 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800837
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000838 /* Parse and initialize group specific information */
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200839 if (priv->mode == MQ_MG_MODE) {
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100840 for_each_available_child_of_node(np, child) {
841 if (of_node_cmp(child->name, "queue-group"))
842 continue;
843
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000844 err = gfar_parse_group(child, priv, model);
845 if (err)
846 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800847 }
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200848 } else { /* SQ_SG_MODE */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000849 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000850 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000851 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800852 }
853
Saurabh Sengar3f8c0f72015-11-20 23:23:58 +0530854 if (of_property_read_bool(np, "bd-stash")) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800855 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
856 priv->bd_stash_en = 1;
857 }
858
Jingchang Lu55917642015-03-13 10:52:32 +0200859 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800860
Jingchang Lu55917642015-03-13 10:52:32 +0200861 if (err == 0)
862 priv->rx_stash_size = stash_len;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800863
Jingchang Lu55917642015-03-13 10:52:32 +0200864 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800865
Jingchang Lu55917642015-03-13 10:52:32 +0200866 if (err == 0)
867 priv->rx_stash_index = stash_idx;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800868
869 if (stash_len || stash_idx)
870 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
871
Andy Flemingb31a1d82008-12-16 15:29:15 -0800872 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000873
Andy Flemingb31a1d82008-12-16 15:29:15 -0800874 if (mac_addr)
Joe Perches6a3c910c2011-11-16 09:38:02 +0000875 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800876
877 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200878 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000879 FSL_GIANFAR_DEV_HAS_COALESCE |
880 FSL_GIANFAR_DEV_HAS_RMON |
881 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
882
Andy Flemingb31a1d82008-12-16 15:29:15 -0800883 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200884 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000885 FSL_GIANFAR_DEV_HAS_COALESCE |
886 FSL_GIANFAR_DEV_HAS_RMON |
887 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000888 FSL_GIANFAR_DEV_HAS_CSUM |
889 FSL_GIANFAR_DEV_HAS_VLAN |
890 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
891 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
Hamish Martin7bff47d2015-12-15 14:14:50 +1300892 FSL_GIANFAR_DEV_HAS_TIMER |
893 FSL_GIANFAR_DEV_HAS_RX_FILER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800894
Jingchang Lu55917642015-03-13 10:52:32 +0200895 err = of_property_read_string(np, "phy-connection-type", &ctype);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800896
897 /* We only care about rgmii-id. The rest are autodetected */
Jingchang Lu55917642015-03-13 10:52:32 +0200898 if (err == 0 && !strcmp(ctype, "rgmii-id"))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800899 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
900 else
901 priv->interface = PHY_INTERFACE_MODE_MII;
902
Jingchang Lu55917642015-03-13 10:52:32 +0200903 if (of_find_property(np, "fsl,magic-packet", NULL))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800904 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
905
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300906 if (of_get_property(np, "fsl,wake-on-filer", NULL))
907 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
908
Grant Likelyfe192a42009-04-25 12:53:12 +0000909 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800910
Florian Fainellibe403642014-05-22 09:47:48 -0700911 /* In the case of a fixed PHY, the DT node associated
912 * to the PHY is the Ethernet MAC DT node.
913 */
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200914 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
Florian Fainellibe403642014-05-22 09:47:48 -0700915 err = of_phy_register_fixed_link(np);
916 if (err)
917 goto err_grp_init;
918
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200919 priv->phy_node = of_node_get(np);
Florian Fainellibe403642014-05-22 09:47:48 -0700920 }
921
Andy Flemingb31a1d82008-12-16 15:29:15 -0800922 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000923 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800924
925 return 0;
926
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000927err_grp_init:
928 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200929rx_alloc_failed:
930 gfar_free_rx_queues(priv);
931tx_alloc_failed:
932 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000933 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800934 return err;
935}
936
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000937static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000938{
939 struct hwtstamp_config config;
940 struct gfar_private *priv = netdev_priv(netdev);
941
942 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
943 return -EFAULT;
944
945 /* reserved for future extensions */
946 if (config.flags)
947 return -EINVAL;
948
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000949 switch (config.tx_type) {
950 case HWTSTAMP_TX_OFF:
951 priv->hwts_tx_en = 0;
952 break;
953 case HWTSTAMP_TX_ON:
954 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
955 return -ERANGE;
956 priv->hwts_tx_en = 1;
957 break;
958 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000959 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000960 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000961
962 switch (config.rx_filter) {
963 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000964 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000965 priv->hwts_rx_en = 0;
Claudiu Manoil08511332014-02-24 12:13:45 +0200966 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000967 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000968 break;
969 default:
970 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
971 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000972 if (!priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000973 priv->hwts_rx_en = 1;
Claudiu Manoil08511332014-02-24 12:13:45 +0200974 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000975 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000976 config.rx_filter = HWTSTAMP_FILTER_ALL;
977 break;
978 }
979
980 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
981 -EFAULT : 0;
982}
983
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000984static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
985{
986 struct hwtstamp_config config;
987 struct gfar_private *priv = netdev_priv(netdev);
988
989 config.flags = 0;
990 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
991 config.rx_filter = (priv->hwts_rx_en ?
992 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
993
994 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
995 -EFAULT : 0;
996}
997
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000998static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
999{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001000 struct phy_device *phydev = dev->phydev;
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001001
1002 if (!netif_running(dev))
1003 return -EINVAL;
1004
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001005 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001006 return gfar_hwtstamp_set(dev, rq);
1007 if (cmd == SIOCGHWTSTAMP)
1008 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001009
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001010 if (!phydev)
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001011 return -ENODEV;
1012
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001013 return phy_mii_ioctl(phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001014}
1015
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001016static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1017 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001018{
1019 u32 rqfpr = FPR_FILER_MASK;
1020 u32 rqfcr = 0x0;
1021
1022 rqfar--;
1023 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001024 priv->ftp_rqfpr[rqfar] = rqfpr;
1025 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001026 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1027
1028 rqfar--;
1029 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001030 priv->ftp_rqfpr[rqfar] = rqfpr;
1031 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001032 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1033
1034 rqfar--;
1035 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1036 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001037 priv->ftp_rqfcr[rqfar] = rqfcr;
1038 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001039 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1040
1041 rqfar--;
1042 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1043 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001044 priv->ftp_rqfcr[rqfar] = rqfcr;
1045 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001046 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1047
1048 return rqfar;
1049}
1050
1051static void gfar_init_filer_table(struct gfar_private *priv)
1052{
1053 int i = 0x0;
1054 u32 rqfar = MAX_FILER_IDX;
1055 u32 rqfcr = 0x0;
1056 u32 rqfpr = FPR_FILER_MASK;
1057
1058 /* Default rule */
1059 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001060 priv->ftp_rqfcr[rqfar] = rqfcr;
1061 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001062 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1063
1064 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1065 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1067 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1070
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +02001071 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001072 priv->cur_filer_idx = rqfar;
1073
1074 /* Rest are masked rules */
1075 rqfcr = RQFCR_CMP_NOMATCH;
1076 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001077 priv->ftp_rqfcr[i] = rqfcr;
1078 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001079 gfar_write_filer(priv, i, rqfcr, rqfpr);
1080 }
1081}
1082
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001083#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001084static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +00001085{
Anton Vorontsov7d350972010-06-30 06:39:12 +00001086 unsigned int pvr = mfspr(SPRN_PVR);
1087 unsigned int svr = mfspr(SPRN_SVR);
1088 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1089 unsigned int rev = svr & 0xffff;
1090
1091 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1092 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001093 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001094 priv->errata |= GFAR_ERRATA_74;
1095
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001096 /* MPC8313 and MPC837x all rev */
1097 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001098 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001099 priv->errata |= GFAR_ERRATA_76;
1100
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001101 /* MPC8313 Rev < 2.0 */
1102 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001103 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001104}
1105
1106static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1107{
1108 unsigned int svr = mfspr(SPRN_SVR);
1109
1110 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1111 priv->errata |= GFAR_ERRATA_12;
Atsushi Nemoto7bfc6082016-03-03 09:07:51 +09001112 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
Claudiu Manoil53fad772013-10-09 20:20:42 +03001113 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
Atsushi Nemoto7bfc6082016-03-03 09:07:51 +09001114 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1115 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
Claudiu Manoil53fad772013-10-09 20:20:42 +03001116 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001117}
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001118#endif
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001119
1120static void gfar_detect_errata(struct gfar_private *priv)
1121{
1122 struct device *dev = &priv->ofdev->dev;
1123
1124 /* no plans to fix */
1125 priv->errata |= GFAR_ERRATA_A002;
1126
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001127#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001128 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1129 __gfar_detect_errata_85xx(priv);
1130 else /* non-mpc85xx parts, i.e. e300 core based */
1131 __gfar_detect_errata_83xx(priv);
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001132#endif
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001133
Anton Vorontsov7d350972010-06-30 06:39:12 +00001134 if (priv->errata)
1135 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1136 priv->errata);
1137}
1138
Claudiu Manoil08511332014-02-24 12:13:45 +02001139void gfar_mac_reset(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
Claudiu Manoil20862782014-02-17 12:53:14 +02001141 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoila328ac92014-02-24 12:13:42 +02001142 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
1144 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001145 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Andy Flemingb98ac702009-02-04 16:38:05 -08001147 /* We need to delay at least 3 TX clocks */
Claudiu Manoila328ac92014-02-24 12:13:42 +02001148 udelay(3);
Andy Flemingb98ac702009-02-04 16:38:05 -08001149
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001150 /* the soft reset bit is not self-resetting, so we need to
1151 * clear it before resuming normal operation
1152 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001153 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Claudiu Manoila328ac92014-02-24 12:13:42 +02001155 udelay(3);
1156
Claudiu Manoil75354142015-07-13 16:22:06 +03001157 gfar_rx_offload_en(priv);
Claudiu Manoil88302642014-02-24 12:13:43 +02001158
1159 /* Initialize the max receive frame/buffer lengths */
Claudiu Manoil75354142015-07-13 16:22:06 +03001160 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1161 gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
Claudiu Manoila328ac92014-02-24 12:13:42 +02001162
1163 /* Initialize the Minimum Frame Length Register */
1164 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001167 tempval = MACCFG2_INIT_SETTINGS;
Claudiu Manoil88302642014-02-24 12:13:43 +02001168
Claudiu Manoil75354142015-07-13 16:22:06 +03001169 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1170 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1171 * and by checking RxBD[LG] and discarding larger than MAXFRM.
Claudiu Manoil88302642014-02-24 12:13:43 +02001172 */
Claudiu Manoil75354142015-07-13 16:22:06 +03001173 if (gfar_has_errata(priv, GFAR_ERRATA_74))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001174 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
Claudiu Manoil88302642014-02-24 12:13:43 +02001175
Anton Vorontsov7d350972010-06-30 06:39:12 +00001176 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Claudiu Manoila328ac92014-02-24 12:13:42 +02001178 /* Clear mac addr hash registers */
1179 gfar_write(&regs->igaddr0, 0);
1180 gfar_write(&regs->igaddr1, 0);
1181 gfar_write(&regs->igaddr2, 0);
1182 gfar_write(&regs->igaddr3, 0);
1183 gfar_write(&regs->igaddr4, 0);
1184 gfar_write(&regs->igaddr5, 0);
1185 gfar_write(&regs->igaddr6, 0);
1186 gfar_write(&regs->igaddr7, 0);
1187
1188 gfar_write(&regs->gaddr0, 0);
1189 gfar_write(&regs->gaddr1, 0);
1190 gfar_write(&regs->gaddr2, 0);
1191 gfar_write(&regs->gaddr3, 0);
1192 gfar_write(&regs->gaddr4, 0);
1193 gfar_write(&regs->gaddr5, 0);
1194 gfar_write(&regs->gaddr6, 0);
1195 gfar_write(&regs->gaddr7, 0);
1196
1197 if (priv->extended_hash)
1198 gfar_clear_exact_match(priv->ndev);
1199
1200 gfar_mac_rx_config(priv);
1201
1202 gfar_mac_tx_config(priv);
1203
1204 gfar_set_mac_address(priv->ndev);
1205
1206 gfar_set_multi(priv->ndev);
1207
1208 /* clear ievent and imask before configuring coalescing */
1209 gfar_ints_disable(priv);
1210
1211 /* Configure the coalescing support */
1212 gfar_configure_coalescing_all(priv);
1213}
1214
1215static void gfar_hw_init(struct gfar_private *priv)
1216{
1217 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1218 u32 attrs;
1219
1220 /* Stop the DMA engine now, in case it was running before
1221 * (The firmware could have used it, and left it running).
1222 */
1223 gfar_halt(priv);
1224
1225 gfar_mac_reset(priv);
1226
1227 /* Zero out the rmon mib registers if it has them */
1228 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1229 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1230
1231 /* Mask off the CAM interrupts */
1232 gfar_write(&regs->rmon.cam1, 0xffffffff);
1233 gfar_write(&regs->rmon.cam2, 0xffffffff);
1234 }
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001237 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001239 /* Set the extraction length and index */
1240 attrs = ATTRELI_EL(priv->rx_stash_size) |
1241 ATTRELI_EI(priv->rx_stash_index);
1242
1243 gfar_write(&regs->attreli, attrs);
1244
1245 /* Start with defaults, and add stashing
1246 * depending on driver parameters
1247 */
1248 attrs = ATTR_INIT_SETTINGS;
1249
1250 if (priv->bd_stash_en)
1251 attrs |= ATTR_BDSTASH;
1252
1253 if (priv->rx_stash_size != 0)
1254 attrs |= ATTR_BUFSTASH;
1255
1256 gfar_write(&regs->attr, attrs);
1257
1258 /* FIFO configs */
1259 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1260 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1261 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1262
Claudiu Manoil20862782014-02-17 12:53:14 +02001263 /* Program the interrupt steering regs, only for MG devices */
1264 if (priv->num_grps > 1)
1265 gfar_write_isrg(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001266}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
Xiubo Li898157e2014-06-04 16:49:16 +08001268static void gfar_init_addr_hash_table(struct gfar_private *priv)
Claudiu Manoil20862782014-02-17 12:53:14 +02001269{
1270 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001271
Andy Flemingb31a1d82008-12-16 15:29:15 -08001272 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001273 priv->extended_hash = 1;
1274 priv->hash_width = 9;
1275
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001276 priv->hash_regs[0] = &regs->igaddr0;
1277 priv->hash_regs[1] = &regs->igaddr1;
1278 priv->hash_regs[2] = &regs->igaddr2;
1279 priv->hash_regs[3] = &regs->igaddr3;
1280 priv->hash_regs[4] = &regs->igaddr4;
1281 priv->hash_regs[5] = &regs->igaddr5;
1282 priv->hash_regs[6] = &regs->igaddr6;
1283 priv->hash_regs[7] = &regs->igaddr7;
1284 priv->hash_regs[8] = &regs->gaddr0;
1285 priv->hash_regs[9] = &regs->gaddr1;
1286 priv->hash_regs[10] = &regs->gaddr2;
1287 priv->hash_regs[11] = &regs->gaddr3;
1288 priv->hash_regs[12] = &regs->gaddr4;
1289 priv->hash_regs[13] = &regs->gaddr5;
1290 priv->hash_regs[14] = &regs->gaddr6;
1291 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001292
1293 } else {
1294 priv->extended_hash = 0;
1295 priv->hash_width = 8;
1296
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001297 priv->hash_regs[0] = &regs->gaddr0;
1298 priv->hash_regs[1] = &regs->gaddr1;
1299 priv->hash_regs[2] = &regs->gaddr2;
1300 priv->hash_regs[3] = &regs->gaddr3;
1301 priv->hash_regs[4] = &regs->gaddr4;
1302 priv->hash_regs[5] = &regs->gaddr5;
1303 priv->hash_regs[6] = &regs->gaddr6;
1304 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001305 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001306}
1307
1308/* Set up the ethernet device structure, private data,
1309 * and anything else we need before we start
1310 */
1311static int gfar_probe(struct platform_device *ofdev)
1312{
Johan Hovold42c70042016-11-28 19:25:02 +01001313 struct device_node *np = ofdev->dev.of_node;
Claudiu Manoil20862782014-02-17 12:53:14 +02001314 struct net_device *dev = NULL;
1315 struct gfar_private *priv = NULL;
1316 int err = 0, i;
1317
1318 err = gfar_of_init(ofdev, &dev);
1319
1320 if (err)
1321 return err;
1322
1323 priv = netdev_priv(dev);
1324 priv->ndev = dev;
1325 priv->ofdev = ofdev;
1326 priv->dev = &ofdev->dev;
1327 SET_NETDEV_DEV(dev, &ofdev->dev);
1328
Claudiu Manoil20862782014-02-17 12:53:14 +02001329 INIT_WORK(&priv->reset_task, gfar_reset_task);
1330
1331 platform_set_drvdata(ofdev, priv);
1332
1333 gfar_detect_errata(priv);
1334
Claudiu Manoil20862782014-02-17 12:53:14 +02001335 /* Set the dev->base_addr to the gfar reg region */
1336 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1337
1338 /* Fill in the dev structure */
1339 dev->watchdog_timeo = TX_TIMEOUT;
Jarod Wilson44770e12016-10-17 15:54:17 -04001340 /* MTU range: 50 - 9586 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001341 dev->mtu = 1500;
Jarod Wilson44770e12016-10-17 15:54:17 -04001342 dev->min_mtu = 50;
1343 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
Claudiu Manoil20862782014-02-17 12:53:14 +02001344 dev->netdev_ops = &gfar_netdev_ops;
1345 dev->ethtool_ops = &gfar_ethtool_ops;
1346
1347 /* Register for napi ...We are registering NAPI for each grp */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001348 for (i = 0; i < priv->num_grps; i++) {
1349 if (priv->poll_mode == GFAR_SQ_POLLING) {
1350 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1351 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001352 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001353 gfar_poll_tx_sq, 2);
1354 } else {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001355 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1356 gfar_poll_rx, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001357 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001358 gfar_poll_tx, 2);
1359 }
1360 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001361
1362 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1363 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1364 NETIF_F_RXCSUM;
1365 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1366 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1367 }
1368
1369 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1370 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1371 NETIF_F_HW_VLAN_CTAG_RX;
1372 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1373 }
1374
Claudiu Manoil3d23a052015-05-06 18:07:30 +03001375 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1376
Claudiu Manoil20862782014-02-17 12:53:14 +02001377 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001378
Zumeng Chen58117672017-12-04 11:22:02 +08001379 /* Insert receive time stamps into padding alignment bytes, and
1380 * plus 2 bytes padding to ensure the cpu alignment.
1381 */
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Zumeng Chen58117672017-12-04 11:22:02 +08001383 priv->padding = 8 + DEFAULT_PADDING;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001384
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001385 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001386 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001387 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001389 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001390 for (i = 0; i < priv->num_tx_queues; i++) {
1391 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1392 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1393 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1394 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1395 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001396
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001397 for (i = 0; i < priv->num_rx_queues; i++) {
1398 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1399 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1400 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Hamish Martin7bff47d2015-12-15 14:14:50 +13001403 /* Always enable rx filer if available */
1404 priv->rx_filer_enable =
1405 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001406 /* Enable most messages by default */
1407 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001408 /* use pritority h/w tx queue scheduling for single queue devices */
1409 if (priv->num_tx_queues == 1)
1410 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001411
Claudiu Manoil08511332014-02-24 12:13:45 +02001412 set_bit(GFAR_DOWN, &priv->state);
1413
Claudiu Manoila328ac92014-02-24 12:13:42 +02001414 gfar_hw_init(priv);
Trent Piephod3eab822008-10-02 11:12:24 +00001415
Fabio Estevamd4c642e2014-06-03 19:55:38 -03001416 /* Carrier starts down, phylib will bring it up */
1417 netif_carrier_off(dev);
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 err = register_netdev(dev);
1420
1421 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001422 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 goto register_fail;
1424 }
1425
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001426 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1427 priv->wol_supported |= GFAR_WOL_MAGIC;
1428
1429 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1430 priv->rx_filer_enable)
1431 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1432
1433 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001434
Dai Harukic50a5d92008-12-17 16:51:32 -08001435 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001436 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001437 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001438 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001439 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001440 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001441 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001442 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001443 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001444 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001445 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001446 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001447 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001448
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001449 /* Initialize the filer table */
1450 gfar_init_filer_table(priv);
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001453 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Jan Ceuleers0977f812012-06-05 03:42:12 +00001455 /* Even more device info helps when determining which kernel
1456 * provided which set of benchmarks.
1457 */
Joe Perches59deab22011-06-14 08:57:47 +00001458 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001459 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001460 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1461 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001462 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001463 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1464 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 return 0;
1467
1468register_fail:
Johan Hovold42c70042016-11-28 19:25:02 +01001469 if (of_phy_is_fixed_link(np))
1470 of_phy_deregister_fixed_link(np);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001471 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001472 gfar_free_rx_queues(priv);
1473 gfar_free_tx_queues(priv);
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001474 of_node_put(priv->phy_node);
1475 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001476 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001477 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
Grant Likely2dc11582010-08-06 09:25:50 -06001480static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001482 struct gfar_private *priv = platform_get_drvdata(ofdev);
Johan Hovold42c70042016-11-28 19:25:02 +01001483 struct device_node *np = ofdev->dev.of_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001485 of_node_put(priv->phy_node);
1486 of_node_put(priv->tbi_node);
Grant Likelyfe192a42009-04-25 12:53:12 +00001487
David S. Millerd9d8e042009-09-06 01:41:02 -07001488 unregister_netdev(priv->ndev);
Johan Hovold42c70042016-11-28 19:25:02 +01001489
1490 if (of_phy_is_fixed_link(np))
1491 of_phy_deregister_fixed_link(np);
1492
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001493 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001494 gfar_free_rx_queues(priv);
1495 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001496 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 return 0;
1499}
1500
Scott Woodd87eb122008-07-11 18:04:45 -05001501#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001502
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001503static void __gfar_filer_disable(struct gfar_private *priv)
1504{
1505 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1506 u32 temp;
1507
1508 temp = gfar_read(&regs->rctrl);
1509 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1510 gfar_write(&regs->rctrl, temp);
1511}
1512
1513static void __gfar_filer_enable(struct gfar_private *priv)
1514{
1515 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1516 u32 temp;
1517
1518 temp = gfar_read(&regs->rctrl);
1519 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1520 gfar_write(&regs->rctrl, temp);
1521}
1522
1523/* Filer rules implementing wol capabilities */
1524static void gfar_filer_config_wol(struct gfar_private *priv)
1525{
1526 unsigned int i;
1527 u32 rqfcr;
1528
1529 __gfar_filer_disable(priv);
1530
1531 /* clear the filer table, reject any packet by default */
1532 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1533 for (i = 0; i <= MAX_FILER_IDX; i++)
1534 gfar_write_filer(priv, i, rqfcr, 0);
1535
1536 i = 0;
1537 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1538 /* unicast packet, accept it */
1539 struct net_device *ndev = priv->ndev;
1540 /* get the default rx queue index */
1541 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1542 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1543 (ndev->dev_addr[1] << 8) |
1544 ndev->dev_addr[2];
1545
1546 rqfcr = (qindex << 10) | RQFCR_AND |
1547 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1548
1549 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1550
1551 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1552 (ndev->dev_addr[4] << 8) |
1553 ndev->dev_addr[5];
1554 rqfcr = (qindex << 10) | RQFCR_GPI |
1555 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1556 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1557 }
1558
1559 __gfar_filer_enable(priv);
1560}
1561
1562static void gfar_filer_restore_table(struct gfar_private *priv)
1563{
1564 u32 rqfcr, rqfpr;
1565 unsigned int i;
1566
1567 __gfar_filer_disable(priv);
1568
1569 for (i = 0; i <= MAX_FILER_IDX; i++) {
1570 rqfcr = priv->ftp_rqfcr[i];
1571 rqfpr = priv->ftp_rqfpr[i];
1572 gfar_write_filer(priv, i, rqfcr, rqfpr);
1573 }
1574
1575 __gfar_filer_enable(priv);
1576}
1577
1578/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1579static void gfar_start_wol_filer(struct gfar_private *priv)
1580{
1581 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1582 u32 tempval;
1583 int i = 0;
1584
1585 /* Enable Rx hw queues */
1586 gfar_write(&regs->rqueue, priv->rqueue);
1587
1588 /* Initialize DMACTRL to have WWR and WOP */
1589 tempval = gfar_read(&regs->dmactrl);
1590 tempval |= DMACTRL_INIT_SETTINGS;
1591 gfar_write(&regs->dmactrl, tempval);
1592
1593 /* Make sure we aren't stopped */
1594 tempval = gfar_read(&regs->dmactrl);
1595 tempval &= ~DMACTRL_GRS;
1596 gfar_write(&regs->dmactrl, tempval);
1597
1598 for (i = 0; i < priv->num_grps; i++) {
1599 regs = priv->gfargrp[i].regs;
1600 /* Clear RHLT, so that the DMA starts polling now */
1601 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1602 /* enable the Filer General Purpose Interrupt */
1603 gfar_write(&regs->imask, IMASK_FGPI);
1604 }
1605
1606 /* Enable Rx DMA */
1607 tempval = gfar_read(&regs->maccfg1);
1608 tempval |= MACCFG1_RX_EN;
1609 gfar_write(&regs->maccfg1, tempval);
1610}
1611
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001612static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001613{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001614 struct gfar_private *priv = dev_get_drvdata(dev);
1615 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001616 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001617 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001618 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001619
Claudiu Manoil614b4242015-07-31 18:38:32 +03001620 if (!netif_running(ndev))
1621 return 0;
1622
1623 disable_napi(priv);
1624 netif_tx_lock(ndev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001625 netif_device_detach(ndev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001626 netif_tx_unlock(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001627
Claudiu Manoil614b4242015-07-31 18:38:32 +03001628 gfar_halt(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001629
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001630 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001631 /* Enable interrupt on Magic Packet */
1632 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001633
Claudiu Manoil614b4242015-07-31 18:38:32 +03001634 /* Enable Magic Packet mode */
1635 tempval = gfar_read(&regs->maccfg2);
1636 tempval |= MACCFG2_MPEN;
1637 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001638
Claudiu Manoil614b4242015-07-31 18:38:32 +03001639 /* re-enable the Rx block */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001640 tempval = gfar_read(&regs->maccfg1);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001641 tempval |= MACCFG1_RX_EN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001642 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001643
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001644 } else if (wol & GFAR_WOL_FILER_UCAST) {
1645 gfar_filer_config_wol(priv);
1646 gfar_start_wol_filer(priv);
1647
Claudiu Manoil614b4242015-07-31 18:38:32 +03001648 } else {
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001649 phy_stop(ndev->phydev);
Scott Woodd87eb122008-07-11 18:04:45 -05001650 }
1651
1652 return 0;
1653}
1654
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001655static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001656{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001657 struct gfar_private *priv = dev_get_drvdata(dev);
1658 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001659 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001660 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001661 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001662
Claudiu Manoil614b4242015-07-31 18:38:32 +03001663 if (!netif_running(ndev))
Scott Woodd87eb122008-07-11 18:04:45 -05001664 return 0;
Scott Woodd87eb122008-07-11 18:04:45 -05001665
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001666 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001667 /* Disable Magic Packet mode */
1668 tempval = gfar_read(&regs->maccfg2);
1669 tempval &= ~MACCFG2_MPEN;
1670 gfar_write(&regs->maccfg2, tempval);
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001671
1672 } else if (wol & GFAR_WOL_FILER_UCAST) {
1673 /* need to stop rx only, tx is already down */
1674 gfar_halt(priv);
1675 gfar_filer_restore_table(priv);
1676
Claudiu Manoil614b4242015-07-31 18:38:32 +03001677 } else {
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001678 phy_start(ndev->phydev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001679 }
Scott Woodd87eb122008-07-11 18:04:45 -05001680
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001681 gfar_start(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001682
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001683 netif_device_attach(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001684 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001685
1686 return 0;
1687}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001688
1689static int gfar_restore(struct device *dev)
1690{
1691 struct gfar_private *priv = dev_get_drvdata(dev);
1692 struct net_device *ndev = priv->ndev;
1693
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001694 if (!netif_running(ndev)) {
1695 netif_device_attach(ndev);
1696
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001697 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001698 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001699
Claudiu Manoil76f31e82015-07-13 16:22:03 +03001700 gfar_init_bds(ndev);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001701
Claudiu Manoila328ac92014-02-24 12:13:42 +02001702 gfar_mac_reset(priv);
1703
1704 gfar_init_tx_rx_base(priv);
1705
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001706 gfar_start(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001707
1708 priv->oldlink = 0;
1709 priv->oldspeed = 0;
1710 priv->oldduplex = -1;
1711
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001712 if (ndev->phydev)
1713 phy_start(ndev->phydev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001714
1715 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001716 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001717
1718 return 0;
1719}
1720
Arvind Yadavee272442017-06-29 11:26:06 +05301721static const struct dev_pm_ops gfar_pm_ops = {
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001722 .suspend = gfar_suspend,
1723 .resume = gfar_resume,
1724 .freeze = gfar_suspend,
1725 .thaw = gfar_resume,
1726 .restore = gfar_restore,
1727};
1728
1729#define GFAR_PM_OPS (&gfar_pm_ops)
1730
Scott Woodd87eb122008-07-11 18:04:45 -05001731#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001732
1733#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001734
Scott Woodd87eb122008-07-11 18:04:45 -05001735#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001737/* Reads the controller's registers to determine what interface
1738 * connects it to the PHY.
1739 */
1740static phy_interface_t gfar_get_interface(struct net_device *dev)
1741{
1742 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001743 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001744 u32 ecntrl;
1745
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001746 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001747
1748 if (ecntrl & ECNTRL_SGMII_MODE)
1749 return PHY_INTERFACE_MODE_SGMII;
1750
1751 if (ecntrl & ECNTRL_TBI_MODE) {
1752 if (ecntrl & ECNTRL_REDUCED_MODE)
1753 return PHY_INTERFACE_MODE_RTBI;
1754 else
1755 return PHY_INTERFACE_MODE_TBI;
1756 }
1757
1758 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001759 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001760 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001761 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001762 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001763 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001764
Jan Ceuleers0977f812012-06-05 03:42:12 +00001765 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001766 * be set by the device tree or platform code.
1767 */
1768 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1769 return PHY_INTERFACE_MODE_RGMII_ID;
1770
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001771 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001772 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001773 }
1774
Andy Flemingb31a1d82008-12-16 15:29:15 -08001775 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001776 return PHY_INTERFACE_MODE_GMII;
1777
1778 return PHY_INTERFACE_MODE_MII;
1779}
1780
1781
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001782/* Initializes driver's PHY state, and attaches to the PHY.
1783 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 */
1785static int init_phy(struct net_device *dev)
1786{
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01001787 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 struct gfar_private *priv = netdev_priv(dev);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001789 phy_interface_t interface;
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001790 struct phy_device *phydev;
Claudiu Manoilb6b5e8a2017-12-07 18:44:23 +02001791 struct ethtool_eee edata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01001793 linkmode_set_bit_array(phy_10_100_features_array,
1794 ARRAY_SIZE(phy_10_100_features_array),
1795 mask);
1796 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1797 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1798 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1799 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 priv->oldlink = 0;
1802 priv->oldspeed = 0;
1803 priv->oldduplex = -1;
1804
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001805 interface = gfar_get_interface(dev);
1806
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001807 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1808 interface);
1809 if (!phydev) {
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001810 dev_err(&dev->dev, "could not attach to PHY\n");
1811 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Kapil Junejad3c12872007-05-11 18:25:11 -05001814 if (interface == PHY_INTERFACE_MODE_SGMII)
1815 gfar_configure_serdes(dev);
1816
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001817 /* Remove any features not supported by the controller */
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01001818 linkmode_and(phydev->supported, phydev->supported, mask);
1819 linkmode_copy(phydev->advertising, phydev->supported);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Andrew Lunnaf8d9bb2018-09-12 01:53:15 +02001821 /* Add support for flow control */
1822 phy_support_asym_pause(phydev);
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001823
Claudiu Manoilb6b5e8a2017-12-07 18:44:23 +02001824 /* disable EEE autoneg, EEE not supported by eTSEC */
1825 memset(&edata, 0, sizeof(struct ethtool_eee));
1826 phy_ethtool_set_eee(phydev, &edata);
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829}
1830
Jan Ceuleers0977f812012-06-05 03:42:12 +00001831/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001832 * SERDES lynx PHY on the chip. We communicate with this PHY
1833 * through the MDIO bus on each controller, treating it as a
1834 * "normal" PHY at the address found in the TBIPA register. We assume
1835 * that the TBIPA register is valid. Either the MDIO bus code will set
1836 * it to a value that doesn't conflict with other PHYs on the bus, or the
1837 * value doesn't matter, as there are no other PHYs on the bus.
1838 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001839static void gfar_configure_serdes(struct net_device *dev)
1840{
1841 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001842 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001843
Grant Likelyfe192a42009-04-25 12:53:12 +00001844 if (!priv->tbi_node) {
1845 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1846 "device tree specify a tbi-handle\n");
1847 return;
1848 }
1849
1850 tbiphy = of_phy_find_device(priv->tbi_node);
1851 if (!tbiphy) {
1852 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001853 return;
1854 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001855
Jan Ceuleers0977f812012-06-05 03:42:12 +00001856 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001857 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1858 * everything for us? Resetting it takes the link down and requires
1859 * several seconds for it to come back.
1860 */
Russell King38737e42015-09-24 20:36:28 +01001861 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
Andrew Lunne5a03bf2016-01-06 20:11:16 +01001862 put_device(&tbiphy->mdio.dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -08001863 return;
Russell King38737e42015-09-24 20:36:28 +01001864 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001865
Paul Gortmakerd0313582008-04-17 00:08:10 -04001866 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001867 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001868
Grant Likelyfe192a42009-04-25 12:53:12 +00001869 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001870 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1871 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001872
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001873 phy_write(tbiphy, MII_BMCR,
1874 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1875 BMCR_SPEED1000);
Russell King04d53b22015-09-24 20:36:18 +01001876
Andrew Lunne5a03bf2016-01-06 20:11:16 +01001877 put_device(&tbiphy->mdio.dev);
Kapil Junejad3c12872007-05-11 18:25:11 -05001878}
1879
Anton Vorontsov511d9342010-06-30 06:39:15 +00001880static int __gfar_is_rx_idle(struct gfar_private *priv)
1881{
1882 u32 res;
1883
Jan Ceuleers0977f812012-06-05 03:42:12 +00001884 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001885 * actually wait for IEVENT_GRSC flag.
1886 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001887 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001888 return 0;
1889
Jan Ceuleers0977f812012-06-05 03:42:12 +00001890 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001891 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1892 * and the Rx can be safely reset.
1893 */
1894 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1895 res &= 0x7f807f80;
1896 if ((res & 0xffff) == (res >> 16))
1897 return 1;
1898
1899 return 0;
1900}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001901
1902/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001903static void gfar_halt_nodisable(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904{
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001905 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 u32 tempval;
Claudiu Manoila4feee82014-10-07 10:44:34 +03001907 unsigned int timeout;
1908 int stopped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001910 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
Claudiu Manoila4feee82014-10-07 10:44:34 +03001912 if (gfar_is_dma_stopped(priv))
1913 return;
1914
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001916 tempval = gfar_read(&regs->dmactrl);
Claudiu Manoila4feee82014-10-07 10:44:34 +03001917 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1918 gfar_write(&regs->dmactrl, tempval);
Anton Vorontsov511d9342010-06-30 06:39:15 +00001919
Claudiu Manoila4feee82014-10-07 10:44:34 +03001920retry:
1921 timeout = 1000;
1922 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1923 cpu_relax();
1924 timeout--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 }
Claudiu Manoila4feee82014-10-07 10:44:34 +03001926
1927 if (!timeout)
1928 stopped = gfar_is_dma_stopped(priv);
1929
1930 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1931 !__gfar_is_rx_idle(priv))
1932 goto retry;
Scott Woodd87eb122008-07-11 18:04:45 -05001933}
Scott Woodd87eb122008-07-11 18:04:45 -05001934
1935/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001936void gfar_halt(struct gfar_private *priv)
Scott Woodd87eb122008-07-11 18:04:45 -05001937{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001938 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001939 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001941 /* Dissable the Rx/Tx hw queues */
1942 gfar_write(&regs->rqueue, 0);
1943 gfar_write(&regs->tqueue, 0);
Scott Wood2a54adc2008-08-12 15:10:46 -05001944
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001945 mdelay(10);
1946
1947 gfar_halt_nodisable(priv);
1948
1949 /* Disable Rx/Tx DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 tempval = gfar_read(&regs->maccfg1);
1951 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1952 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001953}
1954
1955void stop_gfar(struct net_device *dev)
1956{
1957 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001958
Claudiu Manoil08511332014-02-24 12:13:45 +02001959 netif_tx_stop_all_queues(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001960
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001961 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02001962 set_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001963 smp_mb__after_atomic();
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001964
Claudiu Manoil08511332014-02-24 12:13:45 +02001965 disable_napi(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001966
Claudiu Manoil08511332014-02-24 12:13:45 +02001967 /* disable ints and gracefully shut down Rx/Tx DMA */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001968 gfar_halt(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001970 phy_stop(dev->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973}
1974
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001975static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001978 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001979 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001981 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001983 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1984 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001985 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Claudiu Manoila7312d52015-03-13 10:36:28 +02001987 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1988 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001989 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001990 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001991 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001992 txbdp++;
Claudiu Manoila7312d52015-03-13 10:36:28 +02001993 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1994 be16_to_cpu(txbdp->length),
1995 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001997 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001998 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1999 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002001 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00002002 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002003}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002005static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2006{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002007 int i;
2008
Claudiu Manoil75354142015-07-13 16:22:06 +03002009 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
2010
2011 if (rx_queue->skb)
2012 dev_kfree_skb(rx_queue->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002014 for (i = 0; i < rx_queue->rx_ring_size; i++) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002015 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
2016
Anton Vorontsove69edd22009-10-12 06:00:30 +00002017 rxbdp->lstatus = 0;
2018 rxbdp->bufPtr = 0;
2019 rxbdp++;
Claudiu Manoil75354142015-07-13 16:22:06 +03002020
2021 if (!rxb->page)
2022 continue;
2023
Arseny Solokha4af0e5b2017-01-29 19:52:20 +07002024 dma_unmap_page(rx_queue->dev, rxb->dma,
2025 PAGE_SIZE, DMA_FROM_DEVICE);
Claudiu Manoil75354142015-07-13 16:22:06 +03002026 __free_page(rxb->page);
2027
2028 rxb->page = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002030
2031 kfree(rx_queue->rx_buff);
2032 rx_queue->rx_buff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002033}
Anton Vorontsove69edd22009-10-12 06:00:30 +00002034
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002035/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00002036 * Then free tx_skbuff and rx_skbuff
2037 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002038static void free_skb_resources(struct gfar_private *priv)
2039{
2040 struct gfar_priv_tx_q *tx_queue = NULL;
2041 struct gfar_priv_rx_q *rx_queue = NULL;
2042 int i;
2043
2044 /* Go through all the buffer descriptors and free their data buffers */
2045 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002046 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002047
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002048 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002049 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002050 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002051 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002052 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002053 }
2054
2055 for (i = 0; i < priv->num_rx_queues; i++) {
2056 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002057 if (rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002058 free_skb_rx_queue(rx_queue);
2059 }
2060
Claudiu Manoil369ec162013-02-14 05:00:02 +00002061 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002062 sizeof(struct txbd8) * priv->total_tx_ring_size +
2063 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2064 priv->tx_queue[0]->tx_bd_base,
2065 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066}
2067
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002068void gfar_start(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002069{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002070 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002071 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002072 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002073
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002074 /* Enable Rx/Tx hw queues */
2075 gfar_write(&regs->rqueue, priv->rqueue);
2076 gfar_write(&regs->tqueue, priv->tqueue);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002077
2078 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002079 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002080 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002081 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002082
Kumar Gala0bbaf062005-06-20 10:54:21 -05002083 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002084 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002085 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002086 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002087
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002088 for (i = 0; i < priv->num_grps; i++) {
2089 regs = priv->gfargrp[i].regs;
2090 /* Clear THLT/RHLT, so that the DMA starts polling now */
2091 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2092 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002093 }
Dai Haruki12dea572008-12-16 15:30:20 -08002094
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002095 /* Enable Rx/Tx DMA */
2096 tempval = gfar_read(&regs->maccfg1);
2097 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2098 gfar_write(&regs->maccfg1, tempval);
2099
Claudiu Manoilefeddce2014-02-17 12:53:17 +02002100 gfar_ints_enable(priv);
2101
Florian Westphal860e9532016-05-03 16:33:13 +02002102 netif_trans_update(priv->ndev); /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002103}
2104
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002105static void free_grp_irqs(struct gfar_priv_grp *grp)
2106{
2107 free_irq(gfar_irq(grp, TX)->irq, grp);
2108 free_irq(gfar_irq(grp, RX)->irq, grp);
2109 free_irq(gfar_irq(grp, ER)->irq, grp);
2110}
2111
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002112static int register_grp_irqs(struct gfar_priv_grp *grp)
2113{
2114 struct gfar_private *priv = grp->priv;
2115 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00002116 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00002119 * them. Otherwise, only register for the one
2120 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002121 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002122 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00002123 * Transmit, and Receive
2124 */
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002125 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002126 gfar_irq(grp, ER)->name, grp);
2127 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002128 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002129 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002130
Julia Lawall2145f1a2010-08-05 10:26:20 +00002131 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002133 enable_irq_wake(gfar_irq(grp, ER)->irq);
2134
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002135 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2136 gfar_irq(grp, TX)->name, grp);
2137 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002138 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002139 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 goto tx_irq_fail;
2141 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002142 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2143 gfar_irq(grp, RX)->name, grp);
2144 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002145 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002146 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 goto rx_irq_fail;
2148 }
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002149 enable_irq_wake(gfar_irq(grp, RX)->irq);
2150
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 } else {
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002152 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002153 gfar_irq(grp, TX)->name, grp);
2154 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002155 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002156 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 goto err_irq_fail;
2158 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002159 enable_irq_wake(gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 }
2161
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002162 return 0;
2163
2164rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002165 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002166tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002167 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002168err_irq_fail:
2169 return err;
2170
2171}
2172
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002173static void gfar_free_irq(struct gfar_private *priv)
2174{
2175 int i;
2176
2177 /* Free the IRQs */
2178 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2179 for (i = 0; i < priv->num_grps; i++)
2180 free_grp_irqs(&priv->gfargrp[i]);
2181 } else {
2182 for (i = 0; i < priv->num_grps; i++)
2183 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2184 &priv->gfargrp[i]);
2185 }
2186}
2187
2188static int gfar_request_irq(struct gfar_private *priv)
2189{
2190 int err, i, j;
2191
2192 for (i = 0; i < priv->num_grps; i++) {
2193 err = register_grp_irqs(&priv->gfargrp[i]);
2194 if (err) {
2195 for (j = 0; j < i; j++)
2196 free_grp_irqs(&priv->gfargrp[j]);
2197 return err;
2198 }
2199 }
2200
2201 return 0;
2202}
2203
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002204/* Bring the controller up and running */
2205int startup_gfar(struct net_device *ndev)
2206{
2207 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002208 int err;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002209
Claudiu Manoila328ac92014-02-24 12:13:42 +02002210 gfar_mac_reset(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002211
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002212 err = gfar_alloc_skb_resources(ndev);
2213 if (err)
2214 return err;
2215
Claudiu Manoila328ac92014-02-24 12:13:42 +02002216 gfar_init_tx_rx_base(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002217
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002218 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002219 clear_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002220 smp_mb__after_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002221
2222 /* Start Rx/Tx DMA and enable the interrupts */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002223 gfar_start(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Claudiu Manoil2a4eebf2015-08-13 16:50:37 +03002225 /* force link state update after mac reset */
2226 priv->oldlink = 0;
2227 priv->oldspeed = 0;
2228 priv->oldduplex = -1;
2229
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02002230 phy_start(ndev->phydev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00002231
Claudiu Manoil08511332014-02-24 12:13:45 +02002232 enable_napi(priv);
2233
2234 netif_tx_wake_all_queues(ndev);
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
2238
Jan Ceuleers0977f812012-06-05 03:42:12 +00002239/* Called when something needs to use the ethernet device
2240 * Returns 0 for success.
2241 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242static int gfar_enet_open(struct net_device *dev)
2243{
Li Yang94e8cc32007-10-12 21:53:51 +08002244 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 int err;
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 err = init_phy(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002248 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 return err;
2250
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002251 err = gfar_request_irq(priv);
2252 if (err)
2253 return err;
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002256 if (err)
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04002257 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
2259 return err;
2260}
2261
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002262static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002263{
Johannes Bergd58ff352017-06-16 14:29:23 +02002264 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002265
2266 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002267
Kumar Gala0bbaf062005-06-20 10:54:21 -05002268 return fcb;
2269}
2270
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002271static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002272 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002273{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002274 /* If we're here, it's a IP packet with a TCP or UDP
2275 * payload. We set it to checksum, using a pseudo-header
2276 * we provide
2277 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002278 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002279
Jan Ceuleers0977f812012-06-05 03:42:12 +00002280 /* Tell the controller what the protocol is
2281 * And provide the already calculated phcs
2282 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002283 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002284 flags |= TXFCB_UDP;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002285 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002286 } else
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002287 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002288
2289 /* l3os is the distance between the start of the
2290 * frame (skb->data) and the start of the IP hdr.
2291 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002292 * l3 hdr and the l4 hdr
2293 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002294 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002295 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002296
Andy Fleming7f7f5312005-11-11 12:38:59 -06002297 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002298}
2299
Arnd Bergmann278af572016-06-16 15:52:13 +02002300static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002301{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002302 fcb->flags |= TXFCB_VLN;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002303 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
Kumar Gala0bbaf062005-06-20 10:54:21 -05002304}
2305
Dai Haruki4669bc92008-12-17 16:51:04 -08002306static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002307 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002308{
2309 struct txbd8 *new_bd = bdp + stride;
2310
2311 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2312}
2313
2314static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002315 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002316{
2317 return skip_txbd(bdp, 1, base, ring_size);
2318}
2319
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002320/* eTSEC12: csum generation not supported for some fcb offsets */
2321static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2322 unsigned long fcb_addr)
2323{
2324 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2325 (fcb_addr % 0x20) > 0x18);
2326}
2327
2328/* eTSEC76: csum generation for frames larger than 2500 may
2329 * cause excess delays before start of transmission
2330 */
2331static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2332 unsigned int len)
2333{
2334 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2335 (len > 2500));
2336}
2337
Jan Ceuleers0977f812012-06-05 03:42:12 +00002338/* This is called by the kernel when a frame is ready for transmission.
2339 * It is pointed to by the dev->hard_start_xmit function pointer
2340 */
YueHaibing06983aa2018-09-21 10:50:32 +08002341static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342{
2343 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002344 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002345 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002346 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002347 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002348 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002349 u32 lstatus;
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002350 skb_frag_t *frag;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002351 int i, rq = 0;
2352 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002353 u32 bufaddr;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002354 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002355
2356 rq = skb->queue_mapping;
2357 tx_queue = priv->tx_queue[rq];
2358 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002359 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002360 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002361
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002362 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002363 do_vlan = skb_vlan_tag_present(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002364 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2365 priv->hwts_tx_en;
2366
2367 if (do_csum || do_vlan)
2368 fcb_len = GMAC_FCB_LEN;
2369
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002370 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002371 if (unlikely(do_tstamp))
2372 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002373
Li Yang5b28bea2009-03-27 15:54:30 -07002374 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002375 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002376 struct sk_buff *skb_new;
2377
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002378 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002379 if (!skb_new) {
2380 dev->stats.tx_errors++;
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002381 dev_kfree_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002382 return NETDEV_TX_OK;
2383 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002384
Eric Dumazet313b0372012-07-05 11:45:13 +00002385 if (skb->sk)
2386 skb_set_owner_w(skb_new, skb->sk);
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002387 dev_consume_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002388 skb = skb_new;
2389 }
2390
Dai Haruki4669bc92008-12-17 16:51:04 -08002391 /* total number of fragments in the SKB */
2392 nr_frags = skb_shinfo(skb)->nr_frags;
2393
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002394 /* calculate the required number of TxBDs for this skb */
2395 if (unlikely(do_tstamp))
2396 nr_txbds = nr_frags + 2;
2397 else
2398 nr_txbds = nr_frags + 1;
2399
Dai Haruki4669bc92008-12-17 16:51:04 -08002400 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002401 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002402 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002403 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002404 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002405 return NETDEV_TX_BUSY;
2406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002409 bytes_sent = skb->len;
2410 tx_queue->stats.tx_bytes += bytes_sent;
2411 /* keep Tx bytes on wire for BQL accounting */
2412 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002413 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002415 txbdp = txbdp_start = tx_queue->cur_tx;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002416 lstatus = be32_to_cpu(txbdp->lstatus);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002417
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002418 /* Add TxPAL between FCB and frame if required */
2419 if (unlikely(do_tstamp)) {
2420 skb_push(skb, GMAC_TXPAL_LEN);
2421 memset(skb->data, 0, GMAC_TXPAL_LEN);
2422 }
2423
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002424 /* Add TxFCB if required */
2425 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002426 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002427 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002428 }
2429
2430 /* Set up checksumming */
2431 if (do_csum) {
2432 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002433
2434 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2435 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002436 __skb_pull(skb, GMAC_FCB_LEN);
2437 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002438 if (do_vlan || do_tstamp) {
2439 /* put back a new fcb for vlan/tstamp TOE */
2440 fcb = gfar_add_fcb(skb);
2441 } else {
2442 /* Tx TOE not used */
2443 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2444 fcb = NULL;
2445 }
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002446 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002447 }
2448
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002449 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002450 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002451
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002452 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2453 DMA_TO_DEVICE);
2454 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2455 goto dma_map_err;
2456
Claudiu Manoila7312d52015-03-13 10:36:28 +02002457 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Claudiu Manoile19d0832016-02-23 11:48:37 +02002459 /* Time stamp insertion requires one additional TxBD */
2460 if (unlikely(do_tstamp))
2461 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2462 tx_queue->tx_ring_size);
2463
Claudiu Manoil48963b42016-02-23 11:48:39 +02002464 if (likely(!nr_frags)) {
Yangbo Lu9c8b0772016-06-02 17:36:28 +08002465 if (likely(!do_tstamp))
2466 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002467 } else {
2468 u32 lstatus_start = lstatus;
2469
2470 /* Place the fragment addresses and lengths into the TxBDs */
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002471 frag = &skb_shinfo(skb)->frags[0];
2472 for (i = 0; i < nr_frags; i++, frag++) {
2473 unsigned int size;
2474
Claudiu Manoile19d0832016-02-23 11:48:37 +02002475 /* Point at the next BD, wrapping as needed */
2476 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2477
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002478 size = skb_frag_size(frag);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002479
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002480 lstatus = be32_to_cpu(txbdp->lstatus) | size |
Claudiu Manoile19d0832016-02-23 11:48:37 +02002481 BD_LFLAG(TXBD_READY);
2482
2483 /* Handle the last BD specially */
2484 if (i == nr_frags - 1)
2485 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2486
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002487 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
2488 size, DMA_TO_DEVICE);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002489 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2490 goto dma_map_err;
2491
2492 /* set the TxBD length and buffer pointer */
2493 txbdp->bufPtr = cpu_to_be32(bufaddr);
2494 txbdp->lstatus = cpu_to_be32(lstatus);
2495 }
2496
2497 lstatus = lstatus_start;
2498 }
2499
Jan Ceuleers0977f812012-06-05 03:42:12 +00002500 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002501 * first TxBD points to the FCB and must have a data length of
2502 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2503 * the full frame length.
2504 */
2505 if (unlikely(do_tstamp)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002506 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2507
2508 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2509 bufaddr += fcb_len;
Claudiu Manoil48963b42016-02-23 11:48:39 +02002510
Claudiu Manoila7312d52015-03-13 10:36:28 +02002511 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2512 (skb_headlen(skb) - fcb_len);
Claudiu Manoil48963b42016-02-23 11:48:39 +02002513 if (!nr_frags)
2514 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002515
2516 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2517 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002518 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
Claudiu Manoile19d0832016-02-23 11:48:37 +02002519
2520 /* Setup tx hardware time stamping */
2521 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2522 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002523 } else {
2524 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002527 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002528
Claudiu Manoild55398b2014-10-07 10:44:35 +03002529 gfar_wmb();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002530
Claudiu Manoila7312d52015-03-13 10:36:28 +02002531 txbdp_start->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002532
Claudiu Manoild55398b2014-10-07 10:44:35 +03002533 gfar_wmb(); /* force lstatus write before tx_skbuff */
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002534
2535 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2536
Dai Haruki4669bc92008-12-17 16:51:04 -08002537 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002538 * (wrapping if necessary)
2539 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002540 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002541 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002542
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002543 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002544
Claudiu Manoilbc602282015-05-06 18:07:29 +03002545 /* We can work in parallel with gfar_clean_tx_ring(), except
2546 * when modifying num_txbdfree. Note that we didn't grab the lock
2547 * when we were reading the num_txbdfree and checking for available
2548 * space, that's because outside of this function it can only grow.
2549 */
2550 spin_lock_bh(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002551 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002552 tx_queue->num_txbdfree -= (nr_txbds);
Claudiu Manoilbc602282015-05-06 18:07:29 +03002553 spin_unlock_bh(&tx_queue->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
2555 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002556 * are full. We need to tell the kernel to stop sending us stuff.
2557 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002558 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002559 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002561 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 }
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002565 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002567 return NETDEV_TX_OK;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002568
2569dma_map_err:
2570 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2571 if (do_tstamp)
2572 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2573 for (i = 0; i < nr_frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002574 lstatus = be32_to_cpu(txbdp->lstatus);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002575 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2576 break;
2577
Claudiu Manoila7312d52015-03-13 10:36:28 +02002578 lstatus &= ~BD_LFLAG(TXBD_READY);
2579 txbdp->lstatus = cpu_to_be32(lstatus);
2580 bufaddr = be32_to_cpu(txbdp->bufPtr);
2581 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002582 DMA_TO_DEVICE);
2583 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2584 }
2585 gfar_wmb();
2586 dev_kfree_skb_any(skb);
2587 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588}
2589
2590/* Stops the kernel queue, and halts the controller */
2591static int gfar_close(struct net_device *dev)
2592{
2593 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002594
Sebastian Siewiorab939902008-08-19 21:12:45 +02002595 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 stop_gfar(dev);
2597
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002598 /* Disconnect from the PHY */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02002599 phy_disconnect(dev->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002601 gfar_free_irq(priv);
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 return 0;
2604}
2605
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002607static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002609 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610
2611 return 0;
2612}
2613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2615{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Claudiu Manoil08511332014-02-24 12:13:45 +02002618 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2619 cpu_relax();
2620
Claudiu Manoil88302642014-02-24 12:13:43 +02002621 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 stop_gfar(dev);
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 dev->mtu = new_mtu;
2625
Claudiu Manoil88302642014-02-24 12:13:43 +02002626 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 startup_gfar(dev);
2628
Claudiu Manoil08511332014-02-24 12:13:45 +02002629 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2630
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 return 0;
2632}
2633
Claudiu Manoil08511332014-02-24 12:13:45 +02002634void reset_gfar(struct net_device *ndev)
2635{
2636 struct gfar_private *priv = netdev_priv(ndev);
2637
2638 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2639 cpu_relax();
2640
2641 stop_gfar(ndev);
2642 startup_gfar(ndev);
2643
2644 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2645}
2646
Sebastian Siewiorab939902008-08-19 21:12:45 +02002647/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 * transmitted after a set amount of time.
2649 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002650 * starting over will fix the problem.
2651 */
2652static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002654 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002655 reset_task);
Claudiu Manoil08511332014-02-24 12:13:45 +02002656 reset_gfar(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657}
2658
Sebastian Siewiorab939902008-08-19 21:12:45 +02002659static void gfar_timeout(struct net_device *dev)
2660{
2661 struct gfar_private *priv = netdev_priv(dev);
2662
2663 dev->stats.tx_errors++;
2664 schedule_work(&priv->reset_task);
2665}
2666
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002668static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002670 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002671 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002672 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002673 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002674 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002675 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002676 struct sk_buff *skb;
2677 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002678 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002679 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002680 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002681 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002682 int tqi = tx_queue->qindex;
2683 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002684 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002685 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002687 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002688 bdp = tx_queue->dirty_tx;
2689 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002690
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002691 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002692
Dai Haruki4669bc92008-12-17 16:51:04 -08002693 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002694
Jan Ceuleers0977f812012-06-05 03:42:12 +00002695 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002696 * Also, we need to dma_unmap_single() the TxPAL.
2697 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002698 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002699 nr_txbds = frags + 2;
2700 else
2701 nr_txbds = frags + 1;
2702
2703 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002704
Claudiu Manoila7312d52015-03-13 10:36:28 +02002705 lstatus = be32_to_cpu(lbdp->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002706
2707 /* Only clean completed frames */
2708 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002709 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 break;
2711
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002712 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002713 next = next_txbd(bdp, base, tx_ring_size);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002714 buflen = be16_to_cpu(next->length) +
2715 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002716 } else
Claudiu Manoila7312d52015-03-13 10:36:28 +02002717 buflen = be16_to_cpu(bdp->length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002718
Claudiu Manoila7312d52015-03-13 10:36:28 +02002719 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002720 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002721
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002722 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002723 struct skb_shared_hwtstamps shhwtstamps;
Scott Woodb4b67f22015-07-29 16:13:06 +03002724 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2725 ~0x7UL);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002726
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002727 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
Yangbo Luf54af122016-02-24 17:26:56 +08002728 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002729 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002730 skb_tstamp_tx(skb, &shhwtstamps);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002731 gfar_clear_txbd_status(bdp);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002732 bdp = next;
2733 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002734
Claudiu Manoila7312d52015-03-13 10:36:28 +02002735 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002736 bdp = next_txbd(bdp, base, tx_ring_size);
2737
2738 for (i = 0; i < frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002739 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2740 be16_to_cpu(bdp->length),
2741 DMA_TO_DEVICE);
2742 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002743 bdp = next_txbd(bdp, base, tx_ring_size);
2744 }
2745
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002746 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002747
Eric Dumazetacb600d2012-10-05 06:23:55 +00002748 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002749
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002750 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002751
2752 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002753 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002754
Dai Harukid080cd62008-04-09 19:37:51 -05002755 howmany++;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002756 spin_lock(&tx_queue->txlock);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002757 tx_queue->num_txbdfree += nr_txbds;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002758 spin_unlock(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002759 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
Dai Haruki4669bc92008-12-17 16:51:04 -08002761 /* If we freed a buffer, we can restart transmission, if necessary */
Claudiu Manoil08511332014-02-24 12:13:45 +02002762 if (tx_queue->num_txbdfree &&
2763 netif_tx_queue_stopped(txq) &&
2764 !(test_bit(GFAR_DOWN, &priv->state)))
2765 netif_wake_subqueue(priv->ndev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Dai Haruki4669bc92008-12-17 16:51:04 -08002767 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002768 tx_queue->skb_dirtytx = skb_dirtytx;
2769 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002771 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002772}
2773
Claudiu Manoil75354142015-07-13 16:22:06 +03002774static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002775{
Claudiu Manoil75354142015-07-13 16:22:06 +03002776 struct page *page;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002777 dma_addr_t addr;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002778
Claudiu Manoil75354142015-07-13 16:22:06 +03002779 page = dev_alloc_page();
2780 if (unlikely(!page))
2781 return false;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002782
Claudiu Manoil75354142015-07-13 16:22:06 +03002783 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2784 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2785 __free_page(page);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002786
Claudiu Manoil75354142015-07-13 16:22:06 +03002787 return false;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002788 }
2789
Claudiu Manoil75354142015-07-13 16:22:06 +03002790 rxb->dma = addr;
2791 rxb->page = page;
2792 rxb->page_offset = 0;
2793
2794 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795}
2796
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002797static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2798{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002799 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002800 struct gfar_extra_stats *estats = &priv->extra_stats;
2801
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002802 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002803 atomic64_inc(&estats->rx_alloc_err);
2804}
2805
2806static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2807 int alloc_cnt)
2808{
Claudiu Manoil75354142015-07-13 16:22:06 +03002809 struct rxbd8 *bdp;
2810 struct gfar_rx_buff *rxb;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002811 int i;
2812
2813 i = rx_queue->next_to_use;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002814 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002815 rxb = &rx_queue->rx_buff[i];
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002816
2817 while (alloc_cnt--) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002818 /* try reuse page */
2819 if (unlikely(!rxb->page)) {
2820 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002821 gfar_rx_alloc_err(rx_queue);
2822 break;
2823 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002824 }
2825
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002826 /* Setup the new RxBD */
Claudiu Manoil75354142015-07-13 16:22:06 +03002827 gfar_init_rxbdp(rx_queue, bdp,
2828 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002829
2830 /* Update to the next pointer */
Claudiu Manoil75354142015-07-13 16:22:06 +03002831 bdp++;
2832 rxb++;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002833
Claudiu Manoil75354142015-07-13 16:22:06 +03002834 if (unlikely(++i == rx_queue->rx_ring_size)) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002835 i = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +03002836 bdp = rx_queue->rx_bd_base;
2837 rxb = rx_queue->rx_buff;
2838 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002839 }
2840
2841 rx_queue->next_to_use = i;
Claudiu Manoil75354142015-07-13 16:22:06 +03002842 rx_queue->next_to_alloc = i;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002843}
2844
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002845static void count_errors(u32 lstatus, struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002847 struct gfar_private *priv = netdev_priv(ndev);
2848 struct net_device_stats *stats = &ndev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 struct gfar_extra_stats *estats = &priv->extra_stats;
2850
Jan Ceuleers0977f812012-06-05 03:42:12 +00002851 /* If the packet was truncated, none of the other errors matter */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002852 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 stats->rx_length_errors++;
2854
Paul Gortmaker212079d2013-02-12 15:38:19 -05002855 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
2857 return;
2858 }
2859 /* Count the errors, if there were any */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002860 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 stats->rx_length_errors++;
2862
Claudiu Manoilf9660822015-07-13 16:22:04 +03002863 if (lstatus & BD_LFLAG(RXBD_LARGE))
Paul Gortmaker212079d2013-02-12 15:38:19 -05002864 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002866 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002868 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002870 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002872 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002873 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 stats->rx_crc_errors++;
2875 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002876 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002877 atomic64_inc(&estats->rx_overrun);
Claudiu Manoilf9660822015-07-13 16:22:04 +03002878 stats->rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 }
2880}
2881
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002882irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883{
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002884 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2885 unsigned long flags;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002886 u32 imask, ievent;
2887
2888 ievent = gfar_read(&grp->regs->ievent);
2889
2890 if (unlikely(ievent & IEVENT_FGPI)) {
2891 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2892 return IRQ_HANDLED;
2893 }
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002894
2895 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2896 spin_lock_irqsave(&grp->grplock, flags);
2897 imask = gfar_read(&grp->regs->imask);
2898 imask &= IMASK_RX_DISABLED;
2899 gfar_write(&grp->regs->imask, imask);
2900 spin_unlock_irqrestore(&grp->grplock, flags);
2901 __napi_schedule(&grp->napi_rx);
2902 } else {
2903 /* Clear IEVENT, so interrupts aren't called again
2904 * because of the packets that have already arrived.
2905 */
2906 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2907 }
2908
2909 return IRQ_HANDLED;
2910}
2911
2912/* Interrupt Handler for Transmit complete */
2913static irqreturn_t gfar_transmit(int irq, void *grp_id)
2914{
2915 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2916 unsigned long flags;
2917 u32 imask;
2918
2919 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2920 spin_lock_irqsave(&grp->grplock, flags);
2921 imask = gfar_read(&grp->regs->imask);
2922 imask &= IMASK_TX_DISABLED;
2923 gfar_write(&grp->regs->imask, imask);
2924 spin_unlock_irqrestore(&grp->grplock, flags);
2925 __napi_schedule(&grp->napi_tx);
2926 } else {
2927 /* Clear IEVENT, so interrupts aren't called again
2928 * because of the packets that have already arrived.
2929 */
2930 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2931 }
2932
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 return IRQ_HANDLED;
2934}
2935
Claudiu Manoil75354142015-07-13 16:22:06 +03002936static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2937 struct sk_buff *skb, bool first)
2938{
Andy Spencer202a0a702018-01-25 19:37:50 -08002939 int size = lstatus & BD_LENGTH_MASK;
Claudiu Manoil75354142015-07-13 16:22:06 +03002940 struct page *page = rxb->page;
Claudiu Manoil75354142015-07-13 16:22:06 +03002941
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002942 if (likely(first)) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002943 skb_put(skb, size);
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002944 } else {
2945 /* the last fragments' length contains the full frame length */
Andy Spencerd903ec72018-02-22 11:05:33 -08002946 if (lstatus & BD_LFLAG(RXBD_LAST))
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002947 size -= skb->len;
2948
Andy Spencerd903ec72018-02-22 11:05:33 -08002949 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2950 rxb->page_offset + RXBUF_ALIGNMENT,
2951 size, GFAR_RXB_TRUESIZE);
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002952 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002953
2954 /* try reuse page */
Eric Dumazet69fed992017-01-18 19:44:42 -08002955 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
Claudiu Manoil75354142015-07-13 16:22:06 +03002956 return false;
2957
2958 /* change offset to the other half */
2959 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2960
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002961 page_ref_inc(page);
Claudiu Manoil75354142015-07-13 16:22:06 +03002962
2963 return true;
2964}
2965
2966static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2967 struct gfar_rx_buff *old_rxb)
2968{
2969 struct gfar_rx_buff *new_rxb;
2970 u16 nta = rxq->next_to_alloc;
2971
2972 new_rxb = &rxq->rx_buff[nta];
2973
2974 /* find next buf that can reuse a page */
2975 nta++;
2976 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2977
2978 /* copy page reference */
2979 *new_rxb = *old_rxb;
2980
2981 /* sync for use by the device */
2982 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2983 old_rxb->page_offset,
2984 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2985}
2986
2987static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2988 u32 lstatus, struct sk_buff *skb)
2989{
2990 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2991 struct page *page = rxb->page;
2992 bool first = false;
2993
2994 if (likely(!skb)) {
2995 void *buff_addr = page_address(page) + rxb->page_offset;
2996
2997 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2998 if (unlikely(!skb)) {
2999 gfar_rx_alloc_err(rx_queue);
3000 return NULL;
3001 }
3002 skb_reserve(skb, RXBUF_ALIGNMENT);
3003 first = true;
3004 }
3005
3006 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
3007 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
3008
3009 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
3010 /* reuse the free half of the page */
3011 gfar_reuse_rx_page(rx_queue, rxb);
3012 } else {
3013 /* page cannot be reused, unmap it */
3014 dma_unmap_page(rx_queue->dev, rxb->dma,
3015 PAGE_SIZE, DMA_FROM_DEVICE);
3016 }
3017
3018 /* clear rxb content */
3019 rxb->page = NULL;
3020
3021 return skb;
3022}
3023
Kumar Gala0bbaf062005-06-20 10:54:21 -05003024static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3025{
3026 /* If valid headers were found, and valid sums
3027 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00003028 * checksumming is necessary. Otherwise, it is [FIXME]
3029 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003030 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3031 (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05003032 skb->ip_summed = CHECKSUM_UNNECESSARY;
3033 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003034 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003035}
3036
Jan Ceuleers0977f812012-06-05 03:42:12 +00003037/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003038static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003040 struct gfar_private *priv = netdev_priv(ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003041 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Dai Haruki2c2db482008-12-16 15:31:15 -08003043 /* fcb is at the beginning if exists */
3044 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045
Jan Ceuleers0977f812012-06-05 03:42:12 +00003046 /* Remove the FCB from the skb
3047 * Remove the padded bytes, if there are any
3048 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003049 if (priv->uses_rxfcb)
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003050 skb_pull(skb, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003051
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003052 /* Get receive timestamp from the skb */
3053 if (priv->hwts_rx_en) {
3054 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3055 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003056
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003057 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
Yangbo Luf54af122016-02-24 17:26:56 +08003058 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003059 }
3060
3061 if (priv->padding)
3062 skb_pull(skb, priv->padding);
3063
Andy Spencerd903ec72018-02-22 11:05:33 -08003064 /* Trim off the FCS */
3065 pskb_trim(skb, skb->len - ETH_FCS_LEN);
3066
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003067 if (ndev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08003068 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003069
Patrick McHardyf6469682013-04-19 02:04:27 +00003070 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07003071 * Even if vlan rx accel is disabled, on some chips
3072 * RXFCB_VLN is pseudo randomly set.
3073 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003074 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003075 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3076 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3077 be16_to_cpu(fcb->vlctl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078}
3079
3080/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00003081 * until the budget/quota has been reached. Returns the number
3082 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003084int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003086 struct net_device *ndev = rx_queue->ndev;
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003087 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003088 struct rxbd8 *bdp;
3089 int i, howmany = 0;
3090 struct sk_buff *skb = rx_queue->skb;
3091 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3092 unsigned int total_bytes = 0, total_pkts = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093
3094 /* Get the first full descriptor */
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003095 i = rx_queue->next_to_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003097 while (rx_work_limit--) {
Claudiu Manoilf9660822015-07-13 16:22:04 +03003098 u32 lstatus;
Dai Haruki2c2db482008-12-16 15:31:15 -08003099
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003100 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3101 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3102 cleaned_cnt = 0;
3103 }
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003104
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003105 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoilf9660822015-07-13 16:22:04 +03003106 lstatus = be32_to_cpu(bdp->lstatus);
3107 if (lstatus & BD_LFLAG(RXBD_EMPTY))
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003108 break;
3109
3110 /* order rx buffer descriptor reads */
Scott Wood3b6330c2007-05-16 15:06:59 -05003111 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05003112
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003113 /* fetch next to clean buffer from the ring */
Claudiu Manoil75354142015-07-13 16:22:06 +03003114 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3115 if (unlikely(!skb))
3116 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117
Claudiu Manoil75354142015-07-13 16:22:06 +03003118 cleaned_cnt++;
3119 howmany++;
Andy Fleming81183052008-11-12 10:07:11 -06003120
Claudiu Manoil75354142015-07-13 16:22:06 +03003121 if (unlikely(++i == rx_queue->rx_ring_size))
3122 i = 0;
Anton Vorontsov63b88b92010-06-11 10:51:03 +00003123
Claudiu Manoil75354142015-07-13 16:22:06 +03003124 rx_queue->next_to_clean = i;
3125
3126 /* fetch next buffer if not the last in frame */
3127 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3128 continue;
3129
3130 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003131 count_errors(lstatus, ndev);
Andy Fleming815b97c2008-04-22 17:18:29 -05003132
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003133 /* discard faulty buffer */
3134 dev_kfree_skb(skb);
Claudiu Manoil75354142015-07-13 16:22:06 +03003135 skb = NULL;
3136 rx_queue->stats.rx_dropped++;
3137 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 }
3139
Claudiu Manoil590399d2018-02-27 17:33:10 +02003140 gfar_process_frame(ndev, skb);
3141
Claudiu Manoil75354142015-07-13 16:22:06 +03003142 /* Increment the number of packets */
3143 total_pkts++;
3144 total_bytes += skb->len;
3145
3146 skb_record_rx_queue(skb, rx_queue->qindex);
3147
Claudiu Manoil590399d2018-02-27 17:33:10 +02003148 skb->protocol = eth_type_trans(skb, ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003149
3150 /* Send the packet up the stack */
3151 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3152
3153 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 }
3155
Claudiu Manoil75354142015-07-13 16:22:06 +03003156 /* Store incomplete frames for completion */
3157 rx_queue->skb = skb;
3158
3159 rx_queue->stats.rx_packets += total_pkts;
3160 rx_queue->stats.rx_bytes += total_bytes;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003161
3162 if (cleaned_cnt)
3163 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3164
3165 /* Update Last Free RxBD pointer for LFC */
3166 if (unlikely(priv->tx_actual_en)) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003167 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3168
3169 gfar_write(rx_queue->rfbptr, bdp_dma);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 return howmany;
3173}
3174
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003175static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003176{
3177 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003178 container_of(napi, struct gfar_priv_grp, napi_rx);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003179 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003180 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003181 int work_done = 0;
3182
3183 /* Clear IEVENT, so interrupts aren't called again
3184 * because of the packets that have already arrived
3185 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003186 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003187
3188 work_done = gfar_clean_rx_ring(rx_queue, budget);
3189
3190 if (work_done < budget) {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003191 u32 imask;
Eric Dumazet6ad20162017-01-30 08:22:01 -08003192 napi_complete_done(napi, work_done);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003193 /* Clear the halt bit in RSTAT */
3194 gfar_write(&regs->rstat, gfargrp->rstat);
3195
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003196 spin_lock_irq(&gfargrp->grplock);
3197 imask = gfar_read(&regs->imask);
3198 imask |= IMASK_RX_DEFAULT;
3199 gfar_write(&regs->imask, imask);
3200 spin_unlock_irq(&gfargrp->grplock);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003201 }
3202
3203 return work_done;
3204}
3205
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003206static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003208 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003209 container_of(napi, struct gfar_priv_grp, napi_tx);
3210 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003211 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003212 u32 imask;
3213
3214 /* Clear IEVENT, so interrupts aren't called again
3215 * because of the packets that have already arrived
3216 */
3217 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3218
3219 /* run Tx cleanup to completion */
3220 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3221 gfar_clean_tx_ring(tx_queue);
3222
3223 napi_complete(napi);
3224
3225 spin_lock_irq(&gfargrp->grplock);
3226 imask = gfar_read(&regs->imask);
3227 imask |= IMASK_TX_DEFAULT;
3228 gfar_write(&regs->imask, imask);
3229 spin_unlock_irq(&gfargrp->grplock);
3230
3231 return 0;
3232}
3233
3234static int gfar_poll_rx(struct napi_struct *napi, int budget)
3235{
3236 struct gfar_priv_grp *gfargrp =
3237 container_of(napi, struct gfar_priv_grp, napi_rx);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003238 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003239 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003240 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003241 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00003242 int i, budget_per_q = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003243 unsigned long rstat_rxf;
3244 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05003245
Dai Haruki8c7396a2008-12-17 16:52:00 -08003246 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00003247 * because of the packets that have already arrived
3248 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003249 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08003250
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003251 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3252
3253 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3254 if (num_act_queues)
3255 budget_per_q = budget/num_act_queues;
3256
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003257 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3258 /* skip queue if not active */
3259 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3260 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003261
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003262 rx_queue = priv->rx_queue[i];
3263 work_done_per_q =
3264 gfar_clean_rx_ring(rx_queue, budget_per_q);
3265 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003266
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003267 /* finished processing this queue */
3268 if (work_done_per_q < budget_per_q) {
3269 /* clear active queue hw indication */
3270 gfar_write(&regs->rstat,
3271 RSTAT_CLEAR_RXF0 >> i);
3272 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003273
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003274 if (!num_act_queues)
3275 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003276 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003277 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003278
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003279 if (!num_act_queues) {
3280 u32 imask;
Eric Dumazet6ad20162017-01-30 08:22:01 -08003281 napi_complete_done(napi, work_done);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003282
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003283 /* Clear the halt bit in RSTAT */
3284 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003285
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003286 spin_lock_irq(&gfargrp->grplock);
3287 imask = gfar_read(&regs->imask);
3288 imask |= IMASK_RX_DEFAULT;
3289 gfar_write(&regs->imask, imask);
3290 spin_unlock_irq(&gfargrp->grplock);
Dai Harukid080cd62008-04-09 19:37:51 -05003291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003293 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003296static int gfar_poll_tx(struct napi_struct *napi, int budget)
3297{
3298 struct gfar_priv_grp *gfargrp =
3299 container_of(napi, struct gfar_priv_grp, napi_tx);
3300 struct gfar_private *priv = gfargrp->priv;
3301 struct gfar __iomem *regs = gfargrp->regs;
3302 struct gfar_priv_tx_q *tx_queue = NULL;
3303 int has_tx_work = 0;
3304 int i;
3305
3306 /* Clear IEVENT, so interrupts aren't called again
3307 * because of the packets that have already arrived
3308 */
3309 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3310
3311 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3312 tx_queue = priv->tx_queue[i];
3313 /* run Tx cleanup to completion */
3314 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3315 gfar_clean_tx_ring(tx_queue);
3316 has_tx_work = 1;
3317 }
3318 }
3319
3320 if (!has_tx_work) {
3321 u32 imask;
3322 napi_complete(napi);
3323
3324 spin_lock_irq(&gfargrp->grplock);
3325 imask = gfar_read(&regs->imask);
3326 imask |= IMASK_TX_DEFAULT;
3327 gfar_write(&regs->imask, imask);
3328 spin_unlock_irq(&gfargrp->grplock);
3329 }
3330
3331 return 0;
3332}
3333
3334
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003335#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00003336/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003337 * without having to re-enable interrupts. It's not called while
3338 * the interrupt routine is executing.
3339 */
3340static void gfar_netpoll(struct net_device *dev)
3341{
3342 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00003343 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003344
3345 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003346 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003347 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003348 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3349
3350 disable_irq(gfar_irq(grp, TX)->irq);
3351 disable_irq(gfar_irq(grp, RX)->irq);
3352 disable_irq(gfar_irq(grp, ER)->irq);
3353 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3354 enable_irq(gfar_irq(grp, ER)->irq);
3355 enable_irq(gfar_irq(grp, RX)->irq);
3356 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003357 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003358 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003359 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003360 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3361
3362 disable_irq(gfar_irq(grp, TX)->irq);
3363 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3364 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003365 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003366 }
3367}
3368#endif
3369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003371static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003373 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374
3375 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003376 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003379 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003380 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
3382 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003383 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003384 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003386 /* Check for errors */
3387 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003388 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389
3390 return IRQ_HANDLED;
3391}
3392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393/* Called every time the controller might need to be made
3394 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003395 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 * function converts those variables into the appropriate
3397 * register values, and can bring down the device if needed.
3398 */
3399static void adjust_link(struct net_device *dev)
3400{
3401 struct gfar_private *priv = netdev_priv(dev);
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003402 struct phy_device *phydev = dev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003404 if (unlikely(phydev->link != priv->oldlink ||
Guenter Roeck0ae93b22015-03-02 12:03:27 -08003405 (phydev->link && (phydev->duplex != priv->oldduplex ||
3406 phydev->speed != priv->oldspeed))))
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003407 gfar_update_link_state(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003408}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
3410/* Update the hash table based on the current list of multicast
3411 * addresses we subscribe to. Also, change the promiscuity of
3412 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003413 * whenever dev->flags is changed
3414 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415static void gfar_set_multi(struct net_device *dev)
3416{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003417 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003419 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 u32 tempval;
3421
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003422 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 /* Set RCTRL to PROM */
3424 tempval = gfar_read(&regs->rctrl);
3425 tempval |= RCTRL_PROM;
3426 gfar_write(&regs->rctrl, tempval);
3427 } else {
3428 /* Set RCTRL to not PROM */
3429 tempval = gfar_read(&regs->rctrl);
3430 tempval &= ~(RCTRL_PROM);
3431 gfar_write(&regs->rctrl, tempval);
3432 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003433
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003434 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003436 gfar_write(&regs->igaddr0, 0xffffffff);
3437 gfar_write(&regs->igaddr1, 0xffffffff);
3438 gfar_write(&regs->igaddr2, 0xffffffff);
3439 gfar_write(&regs->igaddr3, 0xffffffff);
3440 gfar_write(&regs->igaddr4, 0xffffffff);
3441 gfar_write(&regs->igaddr5, 0xffffffff);
3442 gfar_write(&regs->igaddr6, 0xffffffff);
3443 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 gfar_write(&regs->gaddr0, 0xffffffff);
3445 gfar_write(&regs->gaddr1, 0xffffffff);
3446 gfar_write(&regs->gaddr2, 0xffffffff);
3447 gfar_write(&regs->gaddr3, 0xffffffff);
3448 gfar_write(&regs->gaddr4, 0xffffffff);
3449 gfar_write(&regs->gaddr5, 0xffffffff);
3450 gfar_write(&regs->gaddr6, 0xffffffff);
3451 gfar_write(&regs->gaddr7, 0xffffffff);
3452 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003453 int em_num;
3454 int idx;
3455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003457 gfar_write(&regs->igaddr0, 0x0);
3458 gfar_write(&regs->igaddr1, 0x0);
3459 gfar_write(&regs->igaddr2, 0x0);
3460 gfar_write(&regs->igaddr3, 0x0);
3461 gfar_write(&regs->igaddr4, 0x0);
3462 gfar_write(&regs->igaddr5, 0x0);
3463 gfar_write(&regs->igaddr6, 0x0);
3464 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 gfar_write(&regs->gaddr0, 0x0);
3466 gfar_write(&regs->gaddr1, 0x0);
3467 gfar_write(&regs->gaddr2, 0x0);
3468 gfar_write(&regs->gaddr3, 0x0);
3469 gfar_write(&regs->gaddr4, 0x0);
3470 gfar_write(&regs->gaddr5, 0x0);
3471 gfar_write(&regs->gaddr6, 0x0);
3472 gfar_write(&regs->gaddr7, 0x0);
3473
Andy Fleming7f7f5312005-11-11 12:38:59 -06003474 /* If we have extended hash tables, we need to
3475 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003476 * setting them
3477 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003478 if (priv->extended_hash) {
3479 em_num = GFAR_EM_NUM + 1;
3480 gfar_clear_exact_match(dev);
3481 idx = 1;
3482 } else {
3483 idx = 0;
3484 em_num = 0;
3485 }
3486
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003487 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 return;
3489
3490 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003491 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003492 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003493 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003494 idx++;
3495 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003496 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 }
3498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499}
3500
Andy Fleming7f7f5312005-11-11 12:38:59 -06003501
3502/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003503 * don't interfere with normal reception
3504 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003505static void gfar_clear_exact_match(struct net_device *dev)
3506{
3507 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003508 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003509
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003510 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003511 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003512}
3513
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514/* Set the appropriate hash bit for the given addr */
3515/* The algorithm works like so:
3516 * 1) Take the Destination Address (ie the multicast address), and
3517 * do a CRC on it (little endian), and reverse the bits of the
3518 * result.
3519 * 2) Use the 8 most significant bits as a hash into a 256-entry
3520 * table. The table is controlled through 8 32-bit registers:
3521 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3522 * gaddr7. This means that the 3 most significant bits in the
3523 * hash index which gaddr register to use, and the 5 other bits
3524 * indicate which bit (assuming an IBM numbering scheme, which
3525 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003526 * the entry.
3527 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3529{
3530 u32 tempval;
3531 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c910c2011-11-16 09:38:02 +00003532 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003533 int width = priv->hash_width;
3534 u8 whichbit = (result >> (32 - width)) & 0x1f;
3535 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 u32 value = (1 << (31-whichbit));
3537
Kumar Gala0bbaf062005-06-20 10:54:21 -05003538 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003540 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541}
3542
Andy Fleming7f7f5312005-11-11 12:38:59 -06003543
3544/* There are multiple MAC Address register pairs on some controllers
3545 * This function sets the numth pair to a given address
3546 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003547static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3548 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003549{
3550 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003551 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003552 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003553 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003554
3555 macptr += num*2;
3556
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003557 /* For a station address of 0x12345678ABCD in transmission
3558 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3559 * MACnADDR2 is set to 0x34120000.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003560 */
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003561 tempval = (addr[5] << 24) | (addr[4] << 16) |
3562 (addr[3] << 8) | addr[2];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003563
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003564 gfar_write(macptr, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003565
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003566 tempval = (addr[1] << 24) | (addr[0] << 16);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003567
3568 gfar_write(macptr+1, tempval);
3569}
3570
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003572static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003574 struct gfar_priv_grp *gfargrp = grp_id;
3575 struct gfar __iomem *regs = gfargrp->regs;
3576 struct gfar_private *priv= gfargrp->priv;
3577 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
3579 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003580 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581
3582 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003583 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003584
3585 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003586 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003587 (events & IEVENT_MAG))
3588 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
3590 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003591 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003592 netdev_dbg(dev,
3593 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003594 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595
3596 /* Update the error counters */
3597 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003598 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
3600 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003601 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003603 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 if (events & IEVENT_XFUN) {
Joe Perches59deab22011-06-14 08:57:47 +00003605 netif_dbg(priv, tx_err, dev,
3606 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003607 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003608 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609
Claudiu Manoilbc602282015-05-06 18:07:29 +03003610 schedule_work(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 }
Joe Perches59deab22011-06-14 08:57:47 +00003612 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 }
3614 if (events & IEVENT_BSY) {
Claudiu Manoil1de65a52015-10-23 11:42:00 +03003615 dev->stats.rx_over_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003616 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617
Joe Perches59deab22011-06-14 08:57:47 +00003618 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3619 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 }
3621 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003622 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003623 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624
Joe Perches59deab22011-06-14 08:57:47 +00003625 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 }
3627 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003628 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003629 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 }
Joe Perches59deab22011-06-14 08:57:47 +00003631 if (events & IEVENT_RXC)
3632 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633
3634 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003635 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003636 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 }
3638 return IRQ_HANDLED;
3639}
3640
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003641static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3642{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003643 struct net_device *ndev = priv->ndev;
3644 struct phy_device *phydev = ndev->phydev;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003645 u32 val = 0;
3646
3647 if (!phydev->duplex)
3648 return val;
3649
3650 if (!priv->pause_aneg_en) {
3651 if (priv->tx_pause_en)
3652 val |= MACCFG1_TX_FLOW;
3653 if (priv->rx_pause_en)
3654 val |= MACCFG1_RX_FLOW;
3655 } else {
3656 u16 lcl_adv, rmt_adv;
3657 u8 flowctrl;
3658 /* get link partner capabilities */
3659 rmt_adv = 0;
3660 if (phydev->pause)
3661 rmt_adv = LPA_PAUSE_CAP;
3662 if (phydev->asym_pause)
3663 rmt_adv |= LPA_PAUSE_ASYM;
3664
Andrew Lunn3c1bcc82018-11-10 23:43:33 +01003665 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003666 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3667 if (flowctrl & FLOW_CTRL_TX)
3668 val |= MACCFG1_TX_FLOW;
3669 if (flowctrl & FLOW_CTRL_RX)
3670 val |= MACCFG1_RX_FLOW;
3671 }
3672
3673 return val;
3674}
3675
3676static noinline void gfar_update_link_state(struct gfar_private *priv)
3677{
3678 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003679 struct net_device *ndev = priv->ndev;
3680 struct phy_device *phydev = ndev->phydev;
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003681 struct gfar_priv_rx_q *rx_queue = NULL;
3682 int i;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003683
3684 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3685 return;
3686
3687 if (phydev->link) {
3688 u32 tempval1 = gfar_read(&regs->maccfg1);
3689 u32 tempval = gfar_read(&regs->maccfg2);
3690 u32 ecntrl = gfar_read(&regs->ecntrl);
Claudiu Manoil5d621672017-09-04 10:45:28 +03003691 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003692
3693 if (phydev->duplex != priv->oldduplex) {
3694 if (!(phydev->duplex))
3695 tempval &= ~(MACCFG2_FULL_DUPLEX);
3696 else
3697 tempval |= MACCFG2_FULL_DUPLEX;
3698
3699 priv->oldduplex = phydev->duplex;
3700 }
3701
3702 if (phydev->speed != priv->oldspeed) {
3703 switch (phydev->speed) {
3704 case 1000:
3705 tempval =
3706 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3707
3708 ecntrl &= ~(ECNTRL_R100);
3709 break;
3710 case 100:
3711 case 10:
3712 tempval =
3713 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3714
3715 /* Reduced mode distinguishes
3716 * between 10 and 100
3717 */
3718 if (phydev->speed == SPEED_100)
3719 ecntrl |= ECNTRL_R100;
3720 else
3721 ecntrl &= ~(ECNTRL_R100);
3722 break;
3723 default:
3724 netif_warn(priv, link, priv->ndev,
3725 "Ack! Speed (%d) is not 10/100/1000!\n",
3726 phydev->speed);
3727 break;
3728 }
3729
3730 priv->oldspeed = phydev->speed;
3731 }
3732
3733 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3734 tempval1 |= gfar_get_flowctrl_cfg(priv);
3735
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003736 /* Turn last free buffer recording on */
3737 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3738 for (i = 0; i < priv->num_rx_queues; i++) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003739 u32 bdp_dma;
3740
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003741 rx_queue = priv->rx_queue[i];
Scott Woodb4b67f22015-07-29 16:13:06 +03003742 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3743 gfar_write(rx_queue->rfbptr, bdp_dma);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003744 }
3745
3746 priv->tx_actual_en = 1;
3747 }
3748
3749 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3750 priv->tx_actual_en = 0;
3751
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003752 gfar_write(&regs->maccfg1, tempval1);
3753 gfar_write(&regs->maccfg2, tempval);
3754 gfar_write(&regs->ecntrl, ecntrl);
3755
3756 if (!priv->oldlink)
3757 priv->oldlink = 1;
3758
3759 } else if (priv->oldlink) {
3760 priv->oldlink = 0;
3761 priv->oldspeed = 0;
3762 priv->oldduplex = -1;
3763 }
3764
3765 if (netif_msg_link(priv))
3766 phy_print_status(phydev);
3767}
3768
Fabian Frederick94e5a2a2015-03-17 19:37:34 +01003769static const struct of_device_id gfar_match[] =
Andy Flemingb31a1d82008-12-16 15:29:15 -08003770{
3771 {
3772 .type = "network",
3773 .compatible = "gianfar",
3774 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003775 {
3776 .compatible = "fsl,etsec2",
3777 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003778 {},
3779};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003780MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003781
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003783static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003784 .driver = {
3785 .name = "fsl-gianfar",
Grant Likely40182942010-04-13 16:13:02 -07003786 .pm = GFAR_PM_OPS,
3787 .of_match_table = gfar_match,
3788 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 .probe = gfar_probe,
3790 .remove = gfar_remove,
3791};
3792
Axel Lindb62f682011-11-27 16:44:17 +00003793module_platform_driver(gfar_driver);