blob: a2f972d72e349bfe8d1ad9a7e875cea566f95a11 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060068#include <linux/interrupt.h>
69#include <net/net_namespace.h>
70#include <asm/hvcall.h>
71#include <linux/atomic.h>
72#include <asm/vio.h>
73#include <asm/iommu.h>
74#include <linux/uaccess.h>
75#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050076#include <linux/workqueue.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060077
78#include "ibmvnic.h"
79
80static const char ibmvnic_driver_name[] = "ibmvnic";
81static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
82
83MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
84MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
85MODULE_LICENSE("GPL");
86MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
87
88static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
89static int ibmvnic_remove(struct vio_dev *);
90static void release_sub_crqs(struct ibmvnic_adapter *);
91static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
92static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
93static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
95static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
96 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050097static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060098static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
99static int enable_scrq_irq(struct ibmvnic_adapter *,
100 struct ibmvnic_sub_crq_queue *);
101static int disable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int pending_scrq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static int ibmvnic_poll(struct napi_struct *napi, int data);
108static void send_map_query(struct ibmvnic_adapter *adapter);
109static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
110static void send_request_unmap(struct ibmvnic_adapter *, u8);
John Allenbd0b6722017-03-17 17:13:40 -0500111static void send_login(struct ibmvnic_adapter *adapter);
112static void send_cap_queries(struct ibmvnic_adapter *adapter);
113static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
John Allenea5509f2017-03-17 17:13:43 -0500114static int ibmvnic_init(struct ibmvnic_adapter *);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400115static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600116
117struct ibmvnic_stat {
118 char name[ETH_GSTRING_LEN];
119 int offset;
120};
121
122#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
123 offsetof(struct ibmvnic_statistics, stat))
124#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
125
126static const struct ibmvnic_stat ibmvnic_stats[] = {
127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
149};
150
151static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
152 unsigned long length, unsigned long *number,
153 unsigned long *irq)
154{
155 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
156 long rc;
157
158 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
159 *number = retbuf[0];
160 *irq = retbuf[1];
161
162 return rc;
163}
164
Thomas Falcon032c5e82015-12-21 11:26:06 -0600165static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
166 struct ibmvnic_long_term_buff *ltb, int size)
167{
168 struct device *dev = &adapter->vdev->dev;
169
170 ltb->size = size;
171 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
172 GFP_KERNEL);
173
174 if (!ltb->buff) {
175 dev_err(dev, "Couldn't alloc long term buffer\n");
176 return -ENOMEM;
177 }
178 ltb->map_id = adapter->map_id;
179 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500180
181 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600182 send_request_map(adapter, ltb->addr,
183 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600184 wait_for_completion(&adapter->fw_done);
185 return 0;
186}
187
188static void free_long_term_buff(struct ibmvnic_adapter *adapter,
189 struct ibmvnic_long_term_buff *ltb)
190{
191 struct device *dev = &adapter->vdev->dev;
192
Nathan Fontenotc657e322017-03-30 02:49:06 -0400193 if (!ltb->buff)
194 return;
195
Thomas Falcon032c5e82015-12-21 11:26:06 -0600196 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcondfad09a2016-08-18 11:37:51 -0500197 if (!adapter->failover)
198 send_request_unmap(adapter, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600199}
200
Thomas Falcon032c5e82015-12-21 11:26:06 -0600201static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
202 struct ibmvnic_rx_pool *pool)
203{
204 int count = pool->size - atomic_read(&pool->available);
205 struct device *dev = &adapter->vdev->dev;
206 int buffers_added = 0;
207 unsigned long lpar_rc;
208 union sub_crq sub_crq;
209 struct sk_buff *skb;
210 unsigned int offset;
211 dma_addr_t dma_addr;
212 unsigned char *dst;
213 u64 *handle_array;
214 int shift = 0;
215 int index;
216 int i;
217
218 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
219 be32_to_cpu(adapter->login_rsp_buf->
220 off_rxadd_subcrqs));
221
222 for (i = 0; i < count; ++i) {
223 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
224 if (!skb) {
225 dev_err(dev, "Couldn't replenish rx buff\n");
226 adapter->replenish_no_mem++;
227 break;
228 }
229
230 index = pool->free_map[pool->next_free];
231
232 if (pool->rx_buff[index].skb)
233 dev_err(dev, "Inconsistent free_map!\n");
234
235 /* Copy the skb to the long term mapped DMA buffer */
236 offset = index * pool->buff_size;
237 dst = pool->long_term_buff.buff + offset;
238 memset(dst, 0, pool->buff_size);
239 dma_addr = pool->long_term_buff.addr + offset;
240 pool->rx_buff[index].data = dst;
241
242 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
243 pool->rx_buff[index].dma = dma_addr;
244 pool->rx_buff[index].skb = skb;
245 pool->rx_buff[index].pool_index = pool->index;
246 pool->rx_buff[index].size = pool->buff_size;
247
248 memset(&sub_crq, 0, sizeof(sub_crq));
249 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
250 sub_crq.rx_add.correlator =
251 cpu_to_be64((u64)&pool->rx_buff[index]);
252 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
253 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
254
255 /* The length field of the sCRQ is defined to be 24 bits so the
256 * buffer size needs to be left shifted by a byte before it is
257 * converted to big endian to prevent the last byte from being
258 * truncated.
259 */
260#ifdef __LITTLE_ENDIAN__
261 shift = 8;
262#endif
263 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
264
265 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
266 &sub_crq);
267 if (lpar_rc != H_SUCCESS)
268 goto failure;
269
270 buffers_added++;
271 adapter->replenish_add_buff_success++;
272 pool->next_free = (pool->next_free + 1) % pool->size;
273 }
274 atomic_add(buffers_added, &pool->available);
275 return;
276
277failure:
278 dev_info(dev, "replenish pools failure\n");
279 pool->free_map[pool->next_free] = index;
280 pool->rx_buff[index].skb = NULL;
281 if (!dma_mapping_error(dev, dma_addr))
282 dma_unmap_single(dev, dma_addr, pool->buff_size,
283 DMA_FROM_DEVICE);
284
285 dev_kfree_skb_any(skb);
286 adapter->replenish_add_buff_failure++;
287 atomic_add(buffers_added, &pool->available);
288}
289
290static void replenish_pools(struct ibmvnic_adapter *adapter)
291{
292 int i;
293
294 if (adapter->migrated)
295 return;
296
297 adapter->replenish_task_cycles++;
298 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
299 i++) {
300 if (adapter->rx_pool[i].active)
301 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
302 }
303}
304
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400305static void release_stats_token(struct ibmvnic_adapter *adapter)
306{
307 struct device *dev = &adapter->vdev->dev;
308
309 if (!adapter->stats_token)
310 return;
311
312 dma_unmap_single(dev, adapter->stats_token,
313 sizeof(struct ibmvnic_statistics),
314 DMA_FROM_DEVICE);
315 adapter->stats_token = 0;
316}
317
318static int init_stats_token(struct ibmvnic_adapter *adapter)
319{
320 struct device *dev = &adapter->vdev->dev;
321 dma_addr_t stok;
322
323 stok = dma_map_single(dev, &adapter->stats,
324 sizeof(struct ibmvnic_statistics),
325 DMA_FROM_DEVICE);
326 if (dma_mapping_error(dev, stok)) {
327 dev_err(dev, "Couldn't map stats buffer\n");
328 return -1;
329 }
330
331 adapter->stats_token = stok;
332 return 0;
333}
334
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400335static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600336{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400337 struct ibmvnic_rx_pool *rx_pool;
338 int rx_scrqs;
339 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600340
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400341 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600342 return;
343
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400344 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
345 for (i = 0; i < rx_scrqs; i++) {
346 rx_pool = &adapter->rx_pool[i];
347
348 kfree(rx_pool->free_map);
349 free_long_term_buff(adapter, &rx_pool->long_term_buff);
350
351 if (!rx_pool->rx_buff)
352 continue;
353
354 for (j = 0; j < rx_pool->size; j++) {
355 if (rx_pool->rx_buff[j].skb) {
356 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
357 rx_pool->rx_buff[i].skb = NULL;
358 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600359 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400360
361 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600362 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400363
364 kfree(adapter->rx_pool);
365 adapter->rx_pool = NULL;
366}
367
368static int init_rx_pools(struct net_device *netdev)
369{
370 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 struct device *dev = &adapter->vdev->dev;
372 struct ibmvnic_rx_pool *rx_pool;
373 int rxadd_subcrqs;
374 u64 *size_array;
375 int i, j;
376
377 rxadd_subcrqs =
378 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
379 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
380 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
381
382 adapter->rx_pool = kcalloc(rxadd_subcrqs,
383 sizeof(struct ibmvnic_rx_pool),
384 GFP_KERNEL);
385 if (!adapter->rx_pool) {
386 dev_err(dev, "Failed to allocate rx pools\n");
387 return -1;
388 }
389
390 for (i = 0; i < rxadd_subcrqs; i++) {
391 rx_pool = &adapter->rx_pool[i];
392
393 netdev_dbg(adapter->netdev,
394 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
395 i, adapter->req_rx_add_entries_per_subcrq,
396 be64_to_cpu(size_array[i]));
397
398 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
399 rx_pool->index = i;
400 rx_pool->buff_size = be64_to_cpu(size_array[i]);
401 rx_pool->active = 1;
402
403 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
404 GFP_KERNEL);
405 if (!rx_pool->free_map) {
406 release_rx_pools(adapter);
407 return -1;
408 }
409
410 rx_pool->rx_buff = kcalloc(rx_pool->size,
411 sizeof(struct ibmvnic_rx_buff),
412 GFP_KERNEL);
413 if (!rx_pool->rx_buff) {
414 dev_err(dev, "Couldn't alloc rx buffers\n");
415 release_rx_pools(adapter);
416 return -1;
417 }
418
419 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
420 rx_pool->size * rx_pool->buff_size)) {
421 release_rx_pools(adapter);
422 return -1;
423 }
424
425 for (j = 0; j < rx_pool->size; ++j)
426 rx_pool->free_map[j] = j;
427
428 atomic_set(&rx_pool->available, 0);
429 rx_pool->next_alloc = 0;
430 rx_pool->next_free = 0;
431 }
432
433 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600434}
435
Nathan Fontenotc657e322017-03-30 02:49:06 -0400436static void release_tx_pools(struct ibmvnic_adapter *adapter)
437{
438 struct ibmvnic_tx_pool *tx_pool;
439 int i, tx_scrqs;
440
441 if (!adapter->tx_pool)
442 return;
443
444 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
445 for (i = 0; i < tx_scrqs; i++) {
446 tx_pool = &adapter->tx_pool[i];
447 kfree(tx_pool->tx_buff);
448 free_long_term_buff(adapter, &tx_pool->long_term_buff);
449 kfree(tx_pool->free_map);
450 }
451
452 kfree(adapter->tx_pool);
453 adapter->tx_pool = NULL;
454}
455
456static int init_tx_pools(struct net_device *netdev)
457{
458 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
459 struct device *dev = &adapter->vdev->dev;
460 struct ibmvnic_tx_pool *tx_pool;
461 int tx_subcrqs;
462 int i, j;
463
464 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
465 adapter->tx_pool = kcalloc(tx_subcrqs,
466 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
467 if (!adapter->tx_pool)
468 return -1;
469
470 for (i = 0; i < tx_subcrqs; i++) {
471 tx_pool = &adapter->tx_pool[i];
472 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
473 sizeof(struct ibmvnic_tx_buff),
474 GFP_KERNEL);
475 if (!tx_pool->tx_buff) {
476 dev_err(dev, "tx pool buffer allocation failed\n");
477 release_tx_pools(adapter);
478 return -1;
479 }
480
481 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
482 adapter->req_tx_entries_per_subcrq *
483 adapter->req_mtu)) {
484 release_tx_pools(adapter);
485 return -1;
486 }
487
488 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
489 sizeof(int), GFP_KERNEL);
490 if (!tx_pool->free_map) {
491 release_tx_pools(adapter);
492 return -1;
493 }
494
495 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
496 tx_pool->free_map[j] = j;
497
498 tx_pool->consumer_index = 0;
499 tx_pool->producer_index = 0;
500 }
501
502 return 0;
503}
504
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400505static void release_bounce_buffer(struct ibmvnic_adapter *adapter)
506{
507 struct device *dev = &adapter->vdev->dev;
508
509 if (!adapter->bounce_buffer)
510 return;
511
512 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
513 dma_unmap_single(dev, adapter->bounce_buffer_dma,
514 adapter->bounce_buffer_size,
515 DMA_BIDIRECTIONAL);
516 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
517 }
518
519 kfree(adapter->bounce_buffer);
520 adapter->bounce_buffer = NULL;
521}
522
523static int init_bounce_buffer(struct net_device *netdev)
524{
525 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
526 struct device *dev = &adapter->vdev->dev;
527 char *buf;
528 int buf_sz;
529 dma_addr_t map_addr;
530
531 buf_sz = (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
532 buf = kmalloc(adapter->bounce_buffer_size, GFP_KERNEL);
533 if (!buf)
534 return -1;
535
536 map_addr = dma_map_single(dev, buf, buf_sz, DMA_TO_DEVICE);
537 if (dma_mapping_error(dev, map_addr)) {
538 dev_err(dev, "Couldn't map bounce buffer\n");
539 kfree(buf);
540 return -1;
541 }
542
543 adapter->bounce_buffer = buf;
544 adapter->bounce_buffer_size = buf_sz;
545 adapter->bounce_buffer_dma = map_addr;
546 return 0;
547}
548
John Allena57a5d22017-03-17 17:13:41 -0500549static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600550{
551 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500552 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600553 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600554
John Allenbd0b6722017-03-17 17:13:40 -0500555 do {
556 if (adapter->renegotiate) {
557 adapter->renegotiate = false;
Nathan Fontenotb5108882017-03-30 02:49:18 -0400558 release_sub_crqs(adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500559
560 reinit_completion(&adapter->init_done);
561 send_cap_queries(adapter);
562 if (!wait_for_completion_timeout(&adapter->init_done,
563 timeout)) {
564 dev_err(dev, "Capabilities query timeout\n");
565 return -1;
566 }
567 }
568
569 reinit_completion(&adapter->init_done);
570 send_login(adapter);
571 if (!wait_for_completion_timeout(&adapter->init_done,
572 timeout)) {
573 dev_err(dev, "Login timeout\n");
574 return -1;
575 }
576 } while (adapter->renegotiate);
577
John Allena57a5d22017-03-17 17:13:41 -0500578 return 0;
579}
580
581static int ibmvnic_open(struct net_device *netdev)
582{
583 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
584 struct device *dev = &adapter->vdev->dev;
John Allena57a5d22017-03-17 17:13:41 -0500585 union ibmvnic_crq crq;
586 int rxadd_subcrqs;
John Allena57a5d22017-03-17 17:13:41 -0500587 int tx_subcrqs;
588 int rc = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400589 int i;
John Allena57a5d22017-03-17 17:13:41 -0500590
John Allenea5509f2017-03-17 17:13:43 -0500591 if (adapter->is_closed) {
592 rc = ibmvnic_init(adapter);
593 if (rc)
594 return rc;
595 }
596
John Allena57a5d22017-03-17 17:13:41 -0500597 rc = ibmvnic_login(netdev);
598 if (rc)
599 return rc;
600
John Allenbd0b6722017-03-17 17:13:40 -0500601 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
602 if (rc) {
603 dev_err(dev, "failed to set the number of tx queues\n");
604 return -1;
605 }
606
607 rc = init_sub_crq_irqs(adapter);
608 if (rc) {
609 dev_err(dev, "failed to initialize sub crq irqs\n");
610 return -1;
611 }
612
Thomas Falcon032c5e82015-12-21 11:26:06 -0600613 rxadd_subcrqs =
614 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
615 tx_subcrqs =
616 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400617
Thomas Falcon032c5e82015-12-21 11:26:06 -0600618 adapter->map_id = 1;
619 adapter->napi = kcalloc(adapter->req_rx_queues,
620 sizeof(struct napi_struct), GFP_KERNEL);
621 if (!adapter->napi)
622 goto alloc_napi_failed;
623 for (i = 0; i < adapter->req_rx_queues; i++) {
624 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
625 NAPI_POLL_WEIGHT);
626 napi_enable(&adapter->napi[i]);
627 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600628
Thomas Falcon032c5e82015-12-21 11:26:06 -0600629 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400630
631 rc = init_rx_pools(netdev);
632 if (rc)
633 goto rx_pool_failed;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600634
Nathan Fontenotc657e322017-03-30 02:49:06 -0400635 rc = init_tx_pools(netdev);
636 if (rc)
637 goto tx_pool_failed;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600638
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400639 rc = init_bounce_buffer(netdev);
640 if (rc)
641 goto bounce_init_failed;
642
Thomas Falcon032c5e82015-12-21 11:26:06 -0600643 replenish_pools(adapter);
644
645 /* We're ready to receive frames, enable the sub-crq interrupts and
646 * set the logical link state to up
647 */
648 for (i = 0; i < adapter->req_rx_queues; i++)
649 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
650
651 for (i = 0; i < adapter->req_tx_queues; i++)
652 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
653
654 memset(&crq, 0, sizeof(crq));
655 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
656 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
657 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
658 ibmvnic_send_crq(adapter, &crq);
659
Thomas Falconb8efb892016-07-06 15:35:15 -0500660 netif_tx_start_all_queues(netdev);
John Allenea5509f2017-03-17 17:13:43 -0500661 adapter->is_closed = false;
Thomas Falconb8efb892016-07-06 15:35:15 -0500662
Thomas Falcon032c5e82015-12-21 11:26:06 -0600663 return 0;
664
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400665bounce_init_failed:
Thomas Falcon032c5e82015-12-21 11:26:06 -0600666 i = tx_subcrqs - 1;
667 kfree(adapter->tx_pool[i].free_map);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400668tx_pool_failed:
Thomas Falcon032c5e82015-12-21 11:26:06 -0600669 i = rxadd_subcrqs;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400670rx_pool_failed:
Thomas Falcon032c5e82015-12-21 11:26:06 -0600671 for (i = 0; i < adapter->req_rx_queues; i++)
Nathan Fontenote722af62017-02-10 13:29:06 -0500672 napi_disable(&adapter->napi[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600673alloc_napi_failed:
John Allenbd0b6722017-03-17 17:13:40 -0500674 release_sub_crqs(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600675 return -ENOMEM;
676}
677
John Allenea5509f2017-03-17 17:13:43 -0500678static void ibmvnic_release_resources(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600679{
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400680 release_bounce_buffer(adapter);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400681 release_tx_pools(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400682 release_rx_pools(adapter);
John Allenea5509f2017-03-17 17:13:43 -0500683
684 release_sub_crqs(adapter);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400685 release_crq_queue(adapter);
John Allenea5509f2017-03-17 17:13:43 -0500686
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400687 release_stats_token(adapter);
John Allenea5509f2017-03-17 17:13:43 -0500688}
689
690static int ibmvnic_close(struct net_device *netdev)
691{
692 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
693 union ibmvnic_crq crq;
694 int i;
695
696 adapter->closing = true;
697
698 for (i = 0; i < adapter->req_rx_queues; i++)
699 napi_disable(&adapter->napi[i]);
700
701 if (!adapter->failover)
702 netif_tx_stop_all_queues(netdev);
703
Thomas Falcon032c5e82015-12-21 11:26:06 -0600704 memset(&crq, 0, sizeof(crq));
705 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
706 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
707 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
708 ibmvnic_send_crq(adapter, &crq);
709
John Allenea5509f2017-03-17 17:13:43 -0500710 ibmvnic_release_resources(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600711
John Allenea5509f2017-03-17 17:13:43 -0500712 adapter->is_closed = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600713 adapter->closing = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600714 return 0;
715}
716
Thomas Falconad7775d2016-04-01 17:20:34 -0500717/**
718 * build_hdr_data - creates L2/L3/L4 header data buffer
719 * @hdr_field - bitfield determining needed headers
720 * @skb - socket buffer
721 * @hdr_len - array of header lengths
722 * @tot_len - total length of data
723 *
724 * Reads hdr_field to determine which headers are needed by firmware.
725 * Builds a buffer containing these headers. Saves individual header
726 * lengths and total buffer length to be used to build descriptors.
727 */
728static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
729 int *hdr_len, u8 *hdr_data)
730{
731 int len = 0;
732 u8 *hdr;
733
734 hdr_len[0] = sizeof(struct ethhdr);
735
736 if (skb->protocol == htons(ETH_P_IP)) {
737 hdr_len[1] = ip_hdr(skb)->ihl * 4;
738 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
739 hdr_len[2] = tcp_hdrlen(skb);
740 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
741 hdr_len[2] = sizeof(struct udphdr);
742 } else if (skb->protocol == htons(ETH_P_IPV6)) {
743 hdr_len[1] = sizeof(struct ipv6hdr);
744 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
745 hdr_len[2] = tcp_hdrlen(skb);
746 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
747 hdr_len[2] = sizeof(struct udphdr);
748 }
749
750 memset(hdr_data, 0, 120);
751 if ((hdr_field >> 6) & 1) {
752 hdr = skb_mac_header(skb);
753 memcpy(hdr_data, hdr, hdr_len[0]);
754 len += hdr_len[0];
755 }
756
757 if ((hdr_field >> 5) & 1) {
758 hdr = skb_network_header(skb);
759 memcpy(hdr_data + len, hdr, hdr_len[1]);
760 len += hdr_len[1];
761 }
762
763 if ((hdr_field >> 4) & 1) {
764 hdr = skb_transport_header(skb);
765 memcpy(hdr_data + len, hdr, hdr_len[2]);
766 len += hdr_len[2];
767 }
768 return len;
769}
770
771/**
772 * create_hdr_descs - create header and header extension descriptors
773 * @hdr_field - bitfield determining needed headers
774 * @data - buffer containing header data
775 * @len - length of data buffer
776 * @hdr_len - array of individual header lengths
777 * @scrq_arr - descriptor array
778 *
779 * Creates header and, if needed, header extension descriptors and
780 * places them in a descriptor array, scrq_arr
781 */
782
783static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
784 union sub_crq *scrq_arr)
785{
786 union sub_crq hdr_desc;
787 int tmp_len = len;
788 u8 *data, *cur;
789 int tmp;
790
791 while (tmp_len > 0) {
792 cur = hdr_data + len - tmp_len;
793
794 memset(&hdr_desc, 0, sizeof(hdr_desc));
795 if (cur != hdr_data) {
796 data = hdr_desc.hdr_ext.data;
797 tmp = tmp_len > 29 ? 29 : tmp_len;
798 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
799 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
800 hdr_desc.hdr_ext.len = tmp;
801 } else {
802 data = hdr_desc.hdr.data;
803 tmp = tmp_len > 24 ? 24 : tmp_len;
804 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
805 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
806 hdr_desc.hdr.len = tmp;
807 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
808 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
809 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
810 hdr_desc.hdr.flag = hdr_field << 1;
811 }
812 memcpy(data, cur, tmp);
813 tmp_len -= tmp;
814 *scrq_arr = hdr_desc;
815 scrq_arr++;
816 }
817}
818
819/**
820 * build_hdr_descs_arr - build a header descriptor array
821 * @skb - socket buffer
822 * @num_entries - number of descriptors to be sent
823 * @subcrq - first TX descriptor
824 * @hdr_field - bit field determining which headers will be sent
825 *
826 * This function will build a TX descriptor array with applicable
827 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
828 */
829
830static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
831 int *num_entries, u8 hdr_field)
832{
833 int hdr_len[3] = {0, 0, 0};
834 int tot_len, len;
835 u8 *hdr_data = txbuff->hdr_data;
836
837 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
838 txbuff->hdr_data);
839 len = tot_len;
840 len -= 24;
841 if (len > 0)
842 num_entries += len % 29 ? len / 29 + 1 : len / 29;
843 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
844 txbuff->indir_arr + 1);
845}
846
Thomas Falcon032c5e82015-12-21 11:26:06 -0600847static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
848{
849 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
850 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500851 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600852 struct device *dev = &adapter->vdev->dev;
853 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600854 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600855 struct ibmvnic_tx_pool *tx_pool;
856 unsigned int tx_send_failed = 0;
857 unsigned int tx_map_failed = 0;
858 unsigned int tx_dropped = 0;
859 unsigned int tx_packets = 0;
860 unsigned int tx_bytes = 0;
861 dma_addr_t data_dma_addr;
862 struct netdev_queue *txq;
863 bool used_bounce = false;
864 unsigned long lpar_rc;
865 union sub_crq tx_crq;
866 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500867 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600868 unsigned char *dst;
869 u64 *handle_array;
870 int index = 0;
871 int ret = 0;
872
873 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600874 tx_scrq = adapter->tx_scrq[queue_num];
Thomas Falcon032c5e82015-12-21 11:26:06 -0600875 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
876 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
877 be32_to_cpu(adapter->login_rsp_buf->
878 off_txsubm_subcrqs));
879 if (adapter->migrated) {
880 tx_send_failed++;
881 tx_dropped++;
882 ret = NETDEV_TX_BUSY;
883 goto out;
884 }
885
886 index = tx_pool->free_map[tx_pool->consumer_index];
887 offset = index * adapter->req_mtu;
888 dst = tx_pool->long_term_buff.buff + offset;
889 memset(dst, 0, adapter->req_mtu);
890 skb_copy_from_linear_data(skb, dst, skb->len);
891 data_dma_addr = tx_pool->long_term_buff.addr + offset;
892
893 tx_pool->consumer_index =
894 (tx_pool->consumer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -0600895 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600896
897 tx_buff = &tx_pool->tx_buff[index];
898 tx_buff->skb = skb;
899 tx_buff->data_dma[0] = data_dma_addr;
900 tx_buff->data_len[0] = skb->len;
901 tx_buff->index = index;
902 tx_buff->pool_index = queue_num;
903 tx_buff->last_frag = true;
904 tx_buff->used_bounce = used_bounce;
905
906 memset(&tx_crq, 0, sizeof(tx_crq));
907 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
908 tx_crq.v1.type = IBMVNIC_TX_DESC;
909 tx_crq.v1.n_crq_elem = 1;
910 tx_crq.v1.n_sge = 1;
911 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
912 tx_crq.v1.correlator = cpu_to_be32(index);
913 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
914 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
915 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
916
917 if (adapter->vlan_header_insertion) {
918 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
919 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
920 }
921
922 if (skb->protocol == htons(ETH_P_IP)) {
923 if (ip_hdr(skb)->version == 4)
924 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
925 else if (ip_hdr(skb)->version == 6)
926 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
927
928 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
929 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
930 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
931 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
932 }
933
Thomas Falconad7775d2016-04-01 17:20:34 -0500934 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600935 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500936 hdrs += 2;
937 }
938 /* determine if l2/3/4 headers are sent to firmware */
939 if ((*hdrs >> 7) & 1 &&
940 (skb->protocol == htons(ETH_P_IP) ||
941 skb->protocol == htons(ETH_P_IPV6))) {
942 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
943 tx_crq.v1.n_crq_elem = num_entries;
944 tx_buff->indir_arr[0] = tx_crq;
945 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
946 sizeof(tx_buff->indir_arr),
947 DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
949 if (!firmware_has_feature(FW_FEATURE_CMO))
950 dev_err(dev, "tx: unable to map descriptor array\n");
951 tx_map_failed++;
952 tx_dropped++;
953 ret = NETDEV_TX_BUSY;
954 goto out;
955 }
John Allen498cd8e2016-04-06 11:49:55 -0500956 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500957 (u64)tx_buff->indir_dma,
958 (u64)num_entries);
959 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500960 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
961 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500962 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600963 if (lpar_rc != H_SUCCESS) {
964 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
965
966 if (tx_pool->consumer_index == 0)
967 tx_pool->consumer_index =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600968 adapter->req_tx_entries_per_subcrq - 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600969 else
970 tx_pool->consumer_index--;
971
972 tx_send_failed++;
973 tx_dropped++;
974 ret = NETDEV_TX_BUSY;
975 goto out;
976 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600977
978 atomic_inc(&tx_scrq->used);
979
980 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
981 netdev_info(netdev, "Stopping queue %d\n", queue_num);
982 netif_stop_subqueue(netdev, queue_num);
983 }
984
Thomas Falcon032c5e82015-12-21 11:26:06 -0600985 tx_packets++;
986 tx_bytes += skb->len;
987 txq->trans_start = jiffies;
988 ret = NETDEV_TX_OK;
989
990out:
991 netdev->stats.tx_dropped += tx_dropped;
992 netdev->stats.tx_bytes += tx_bytes;
993 netdev->stats.tx_packets += tx_packets;
994 adapter->tx_send_failed += tx_send_failed;
995 adapter->tx_map_failed += tx_map_failed;
996
997 return ret;
998}
999
1000static void ibmvnic_set_multi(struct net_device *netdev)
1001{
1002 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1003 struct netdev_hw_addr *ha;
1004 union ibmvnic_crq crq;
1005
1006 memset(&crq, 0, sizeof(crq));
1007 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1008 crq.request_capability.cmd = REQUEST_CAPABILITY;
1009
1010 if (netdev->flags & IFF_PROMISC) {
1011 if (!adapter->promisc_supported)
1012 return;
1013 } else {
1014 if (netdev->flags & IFF_ALLMULTI) {
1015 /* Accept all multicast */
1016 memset(&crq, 0, sizeof(crq));
1017 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1018 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1019 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1020 ibmvnic_send_crq(adapter, &crq);
1021 } else if (netdev_mc_empty(netdev)) {
1022 /* Reject all multicast */
1023 memset(&crq, 0, sizeof(crq));
1024 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1025 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1026 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1027 ibmvnic_send_crq(adapter, &crq);
1028 } else {
1029 /* Accept one or more multicast(s) */
1030 netdev_for_each_mc_addr(ha, netdev) {
1031 memset(&crq, 0, sizeof(crq));
1032 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1033 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1034 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1035 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1036 ha->addr);
1037 ibmvnic_send_crq(adapter, &crq);
1038 }
1039 }
1040 }
1041}
1042
1043static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1044{
1045 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1046 struct sockaddr *addr = p;
1047 union ibmvnic_crq crq;
1048
1049 if (!is_valid_ether_addr(addr->sa_data))
1050 return -EADDRNOTAVAIL;
1051
1052 memset(&crq, 0, sizeof(crq));
1053 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1054 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1055 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1056 ibmvnic_send_crq(adapter, &crq);
1057 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1058 return 0;
1059}
1060
Thomas Falcon032c5e82015-12-21 11:26:06 -06001061static void ibmvnic_tx_timeout(struct net_device *dev)
1062{
1063 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1064 int rc;
1065
1066 /* Adapter timed out, resetting it */
1067 release_sub_crqs(adapter);
1068 rc = ibmvnic_reset_crq(adapter);
1069 if (rc)
1070 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
1071 else
1072 ibmvnic_send_crq_init(adapter);
1073}
1074
1075static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1076 struct ibmvnic_rx_buff *rx_buff)
1077{
1078 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1079
1080 rx_buff->skb = NULL;
1081
1082 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1083 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1084
1085 atomic_dec(&pool->available);
1086}
1087
1088static int ibmvnic_poll(struct napi_struct *napi, int budget)
1089{
1090 struct net_device *netdev = napi->dev;
1091 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1092 int scrq_num = (int)(napi - adapter->napi);
1093 int frames_processed = 0;
1094restart_poll:
1095 while (frames_processed < budget) {
1096 struct sk_buff *skb;
1097 struct ibmvnic_rx_buff *rx_buff;
1098 union sub_crq *next;
1099 u32 length;
1100 u16 offset;
1101 u8 flags = 0;
1102
1103 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1104 break;
1105 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1106 rx_buff =
1107 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1108 rx_comp.correlator);
1109 /* do error checking */
1110 if (next->rx_comp.rc) {
1111 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1112 /* free the entry */
1113 next->rx_comp.first = 0;
1114 remove_buff_from_pool(adapter, rx_buff);
1115 break;
1116 }
1117
1118 length = be32_to_cpu(next->rx_comp.len);
1119 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1120 flags = next->rx_comp.flags;
1121 skb = rx_buff->skb;
1122 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1123 length);
1124 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1125 /* free the entry */
1126 next->rx_comp.first = 0;
1127 remove_buff_from_pool(adapter, rx_buff);
1128
1129 skb_put(skb, length);
1130 skb->protocol = eth_type_trans(skb, netdev);
1131
1132 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1133 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1134 skb->ip_summed = CHECKSUM_UNNECESSARY;
1135 }
1136
1137 length = skb->len;
1138 napi_gro_receive(napi, skb); /* send it up */
1139 netdev->stats.rx_packets++;
1140 netdev->stats.rx_bytes += length;
1141 frames_processed++;
1142 }
John Allen498cd8e2016-04-06 11:49:55 -05001143 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001144
1145 if (frames_processed < budget) {
1146 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08001147 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001148 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1149 napi_reschedule(napi)) {
1150 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1151 goto restart_poll;
1152 }
1153 }
1154 return frames_processed;
1155}
1156
1157#ifdef CONFIG_NET_POLL_CONTROLLER
1158static void ibmvnic_netpoll_controller(struct net_device *dev)
1159{
1160 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1161 int i;
1162
1163 replenish_pools(netdev_priv(dev));
1164 for (i = 0; i < adapter->req_rx_queues; i++)
1165 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1166 adapter->rx_scrq[i]);
1167}
1168#endif
1169
1170static const struct net_device_ops ibmvnic_netdev_ops = {
1171 .ndo_open = ibmvnic_open,
1172 .ndo_stop = ibmvnic_close,
1173 .ndo_start_xmit = ibmvnic_xmit,
1174 .ndo_set_rx_mode = ibmvnic_set_multi,
1175 .ndo_set_mac_address = ibmvnic_set_mac,
1176 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001177 .ndo_tx_timeout = ibmvnic_tx_timeout,
1178#ifdef CONFIG_NET_POLL_CONTROLLER
1179 .ndo_poll_controller = ibmvnic_netpoll_controller,
1180#endif
1181};
1182
1183/* ethtool functions */
1184
Philippe Reynes8a433792017-01-07 22:37:29 +01001185static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1186 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001187{
Philippe Reynes8a433792017-01-07 22:37:29 +01001188 u32 supported, advertising;
1189
1190 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001191 SUPPORTED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001192 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001193 ADVERTISED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001194 cmd->base.speed = SPEED_1000;
1195 cmd->base.duplex = DUPLEX_FULL;
1196 cmd->base.port = PORT_FIBRE;
1197 cmd->base.phy_address = 0;
1198 cmd->base.autoneg = AUTONEG_ENABLE;
1199
1200 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1201 supported);
1202 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1203 advertising);
1204
Thomas Falcon032c5e82015-12-21 11:26:06 -06001205 return 0;
1206}
1207
1208static void ibmvnic_get_drvinfo(struct net_device *dev,
1209 struct ethtool_drvinfo *info)
1210{
1211 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1212 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1213}
1214
1215static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1216{
1217 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1218
1219 return adapter->msg_enable;
1220}
1221
1222static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1223{
1224 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1225
1226 adapter->msg_enable = data;
1227}
1228
1229static u32 ibmvnic_get_link(struct net_device *netdev)
1230{
1231 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1232
1233 /* Don't need to send a query because we request a logical link up at
1234 * init and then we wait for link state indications
1235 */
1236 return adapter->logical_link_state;
1237}
1238
1239static void ibmvnic_get_ringparam(struct net_device *netdev,
1240 struct ethtool_ringparam *ring)
1241{
1242 ring->rx_max_pending = 0;
1243 ring->tx_max_pending = 0;
1244 ring->rx_mini_max_pending = 0;
1245 ring->rx_jumbo_max_pending = 0;
1246 ring->rx_pending = 0;
1247 ring->tx_pending = 0;
1248 ring->rx_mini_pending = 0;
1249 ring->rx_jumbo_pending = 0;
1250}
1251
1252static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1253{
1254 int i;
1255
1256 if (stringset != ETH_SS_STATS)
1257 return;
1258
1259 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1260 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1261}
1262
1263static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1264{
1265 switch (sset) {
1266 case ETH_SS_STATS:
1267 return ARRAY_SIZE(ibmvnic_stats);
1268 default:
1269 return -EOPNOTSUPP;
1270 }
1271}
1272
1273static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1274 struct ethtool_stats *stats, u64 *data)
1275{
1276 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1277 union ibmvnic_crq crq;
1278 int i;
1279
1280 memset(&crq, 0, sizeof(crq));
1281 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1282 crq.request_statistics.cmd = REQUEST_STATISTICS;
1283 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1284 crq.request_statistics.len =
1285 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001286
1287 /* Wait for data to be written */
1288 init_completion(&adapter->stats_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05001289 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001290 wait_for_completion(&adapter->stats_done);
1291
1292 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1293 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1294}
1295
1296static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001297 .get_drvinfo = ibmvnic_get_drvinfo,
1298 .get_msglevel = ibmvnic_get_msglevel,
1299 .set_msglevel = ibmvnic_set_msglevel,
1300 .get_link = ibmvnic_get_link,
1301 .get_ringparam = ibmvnic_get_ringparam,
1302 .get_strings = ibmvnic_get_strings,
1303 .get_sset_count = ibmvnic_get_sset_count,
1304 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01001305 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001306};
1307
1308/* Routines for managing CRQs/sCRQs */
1309
1310static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1311 struct ibmvnic_sub_crq_queue *scrq)
1312{
1313 struct device *dev = &adapter->vdev->dev;
1314 long rc;
1315
1316 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1317
1318 /* Close the sub-crqs */
1319 do {
1320 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1321 adapter->vdev->unit_address,
1322 scrq->crq_num);
1323 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1324
1325 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1326 DMA_BIDIRECTIONAL);
1327 free_pages((unsigned long)scrq->msgs, 2);
1328 kfree(scrq);
1329}
1330
1331static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1332 *adapter)
1333{
1334 struct device *dev = &adapter->vdev->dev;
1335 struct ibmvnic_sub_crq_queue *scrq;
1336 int rc;
1337
1338 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1339 if (!scrq)
1340 return NULL;
1341
Thomas Falcon12608c22016-10-17 15:28:09 -05001342 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001343 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1344 if (!scrq->msgs) {
1345 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1346 goto zero_page_failed;
1347 }
1348
1349 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1350 DMA_BIDIRECTIONAL);
1351 if (dma_mapping_error(dev, scrq->msg_token)) {
1352 dev_warn(dev, "Couldn't map crq queue messages page\n");
1353 goto map_failed;
1354 }
1355
1356 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1357 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1358
1359 if (rc == H_RESOURCE)
1360 rc = ibmvnic_reset_crq(adapter);
1361
1362 if (rc == H_CLOSED) {
1363 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1364 } else if (rc) {
1365 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1366 goto reg_failed;
1367 }
1368
Thomas Falcon032c5e82015-12-21 11:26:06 -06001369 scrq->adapter = adapter;
1370 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1371 scrq->cur = 0;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001372 atomic_set(&scrq->used, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001373 scrq->rx_skb_top = NULL;
1374 spin_lock_init(&scrq->lock);
1375
1376 netdev_dbg(adapter->netdev,
1377 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1378 scrq->crq_num, scrq->hw_irq, scrq->irq);
1379
1380 return scrq;
1381
Thomas Falcon032c5e82015-12-21 11:26:06 -06001382reg_failed:
1383 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1384 DMA_BIDIRECTIONAL);
1385map_failed:
1386 free_pages((unsigned long)scrq->msgs, 2);
1387zero_page_failed:
1388 kfree(scrq);
1389
1390 return NULL;
1391}
1392
1393static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1394{
1395 int i;
1396
1397 if (adapter->tx_scrq) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04001398 for (i = 0; i < adapter->req_tx_queues; i++) {
1399 if (!adapter->tx_scrq[i])
1400 continue;
1401
1402 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001403 free_irq(adapter->tx_scrq[i]->irq,
1404 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001405 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04001406 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001407 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001408
1409 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1410 }
1411
Nathan Fontenot9501df32017-03-15 23:38:07 -04001412 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001413 adapter->tx_scrq = NULL;
1414 }
1415
1416 if (adapter->rx_scrq) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04001417 for (i = 0; i < adapter->req_rx_queues; i++) {
1418 if (!adapter->rx_scrq[i])
1419 continue;
1420
1421 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001422 free_irq(adapter->rx_scrq[i]->irq,
1423 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001424 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04001425 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001426 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001427
1428 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1429 }
1430
Nathan Fontenot9501df32017-03-15 23:38:07 -04001431 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001432 adapter->rx_scrq = NULL;
1433 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001434}
1435
1436static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1437 struct ibmvnic_sub_crq_queue *scrq)
1438{
1439 struct device *dev = &adapter->vdev->dev;
1440 unsigned long rc;
1441
1442 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1443 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1444 if (rc)
1445 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1446 scrq->hw_irq, rc);
1447 return rc;
1448}
1449
1450static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1451 struct ibmvnic_sub_crq_queue *scrq)
1452{
1453 struct device *dev = &adapter->vdev->dev;
1454 unsigned long rc;
1455
1456 if (scrq->hw_irq > 0x100000000ULL) {
1457 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1458 return 1;
1459 }
1460
1461 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1462 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1463 if (rc)
1464 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1465 scrq->hw_irq, rc);
1466 return rc;
1467}
1468
1469static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1470 struct ibmvnic_sub_crq_queue *scrq)
1471{
1472 struct device *dev = &adapter->vdev->dev;
1473 struct ibmvnic_tx_buff *txbuff;
1474 union sub_crq *next;
1475 int index;
1476 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001477 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001478
1479restart_loop:
1480 while (pending_scrq(adapter, scrq)) {
1481 unsigned int pool = scrq->pool_index;
1482
1483 next = ibmvnic_next_scrq(adapter, scrq);
1484 for (i = 0; i < next->tx_comp.num_comps; i++) {
1485 if (next->tx_comp.rcs[i]) {
1486 dev_err(dev, "tx error %x\n",
1487 next->tx_comp.rcs[i]);
1488 continue;
1489 }
1490 index = be32_to_cpu(next->tx_comp.correlators[i]);
1491 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1492
1493 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1494 if (!txbuff->data_dma[j])
1495 continue;
1496
1497 txbuff->data_dma[j] = 0;
1498 txbuff->used_bounce = false;
1499 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001500 /* if sub_crq was sent indirectly */
1501 first = txbuff->indir_arr[0].generic.first;
1502 if (first == IBMVNIC_CRQ_CMD) {
1503 dma_unmap_single(dev, txbuff->indir_dma,
1504 sizeof(txbuff->indir_arr),
1505 DMA_TO_DEVICE);
1506 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001507
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001508 if (txbuff->last_frag) {
1509 atomic_dec(&scrq->used);
1510
1511 if (atomic_read(&scrq->used) <=
1512 (adapter->req_tx_entries_per_subcrq / 2) &&
1513 netif_subqueue_stopped(adapter->netdev,
1514 txbuff->skb)) {
1515 netif_wake_subqueue(adapter->netdev,
1516 scrq->pool_index);
1517 netdev_dbg(adapter->netdev,
1518 "Started queue %d\n",
1519 scrq->pool_index);
1520 }
1521
Thomas Falcon032c5e82015-12-21 11:26:06 -06001522 dev_kfree_skb_any(txbuff->skb);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001523 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001524
1525 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1526 producer_index] = index;
1527 adapter->tx_pool[pool].producer_index =
1528 (adapter->tx_pool[pool].producer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001529 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001530 }
1531 /* remove tx_comp scrq*/
1532 next->tx_comp.first = 0;
1533 }
1534
1535 enable_scrq_irq(adapter, scrq);
1536
1537 if (pending_scrq(adapter, scrq)) {
1538 disable_scrq_irq(adapter, scrq);
1539 goto restart_loop;
1540 }
1541
1542 return 0;
1543}
1544
1545static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1546{
1547 struct ibmvnic_sub_crq_queue *scrq = instance;
1548 struct ibmvnic_adapter *adapter = scrq->adapter;
1549
1550 disable_scrq_irq(adapter, scrq);
1551 ibmvnic_complete_tx(adapter, scrq);
1552
1553 return IRQ_HANDLED;
1554}
1555
1556static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1557{
1558 struct ibmvnic_sub_crq_queue *scrq = instance;
1559 struct ibmvnic_adapter *adapter = scrq->adapter;
1560
1561 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1562 disable_scrq_irq(adapter, scrq);
1563 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1564 }
1565
1566 return IRQ_HANDLED;
1567}
1568
Thomas Falconea22d512016-07-06 15:35:17 -05001569static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1570{
1571 struct device *dev = &adapter->vdev->dev;
1572 struct ibmvnic_sub_crq_queue *scrq;
1573 int i = 0, j = 0;
1574 int rc = 0;
1575
1576 for (i = 0; i < adapter->req_tx_queues; i++) {
1577 scrq = adapter->tx_scrq[i];
1578 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1579
Michael Ellerman99c17902016-09-10 19:59:05 +10001580 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001581 rc = -EINVAL;
1582 dev_err(dev, "Error mapping irq\n");
1583 goto req_tx_irq_failed;
1584 }
1585
1586 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1587 0, "ibmvnic_tx", scrq);
1588
1589 if (rc) {
1590 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1591 scrq->irq, rc);
1592 irq_dispose_mapping(scrq->irq);
1593 goto req_rx_irq_failed;
1594 }
1595 }
1596
1597 for (i = 0; i < adapter->req_rx_queues; i++) {
1598 scrq = adapter->rx_scrq[i];
1599 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001600 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001601 rc = -EINVAL;
1602 dev_err(dev, "Error mapping irq\n");
1603 goto req_rx_irq_failed;
1604 }
1605 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1606 0, "ibmvnic_rx", scrq);
1607 if (rc) {
1608 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1609 scrq->irq, rc);
1610 irq_dispose_mapping(scrq->irq);
1611 goto req_rx_irq_failed;
1612 }
1613 }
1614 return rc;
1615
1616req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001617 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001618 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1619 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001620 }
Thomas Falconea22d512016-07-06 15:35:17 -05001621 i = adapter->req_tx_queues;
1622req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001623 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001624 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1625 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001626 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001627 release_sub_crqs(adapter);
Thomas Falconea22d512016-07-06 15:35:17 -05001628 return rc;
1629}
1630
Thomas Falcon032c5e82015-12-21 11:26:06 -06001631static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1632{
1633 struct device *dev = &adapter->vdev->dev;
1634 struct ibmvnic_sub_crq_queue **allqueues;
1635 int registered_queues = 0;
1636 union ibmvnic_crq crq;
1637 int total_queues;
1638 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001639 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001640
1641 if (!retry) {
1642 /* Sub-CRQ entries are 32 byte long */
1643 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1644
1645 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1646 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1647 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1648 goto allqueues_failed;
1649 }
1650
1651 /* Get the minimum between the queried max and the entries
1652 * that fit in our PAGE_SIZE
1653 */
1654 adapter->req_tx_entries_per_subcrq =
1655 adapter->max_tx_entries_per_subcrq > entries_page ?
1656 entries_page : adapter->max_tx_entries_per_subcrq;
1657 adapter->req_rx_add_entries_per_subcrq =
1658 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1659 entries_page : adapter->max_rx_add_entries_per_subcrq;
1660
John Allen6dbcd8f2016-11-07 14:27:28 -06001661 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1662 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
John Allen498cd8e2016-04-06 11:49:55 -05001663 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001664
Thomas Falconf39f0d12017-02-14 10:22:59 -06001665 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001666 }
1667
1668 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1669
1670 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1671 if (!allqueues)
1672 goto allqueues_failed;
1673
1674 for (i = 0; i < total_queues; i++) {
1675 allqueues[i] = init_sub_crq_queue(adapter);
1676 if (!allqueues[i]) {
1677 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1678 break;
1679 }
1680 registered_queues++;
1681 }
1682
1683 /* Make sure we were able to register the minimum number of queues */
1684 if (registered_queues <
1685 adapter->min_tx_queues + adapter->min_rx_queues) {
1686 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1687 goto tx_failed;
1688 }
1689
1690 /* Distribute the failed allocated queues*/
1691 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1692 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1693 switch (i % 3) {
1694 case 0:
1695 if (adapter->req_rx_queues > adapter->min_rx_queues)
1696 adapter->req_rx_queues--;
1697 else
1698 more++;
1699 break;
1700 case 1:
1701 if (adapter->req_tx_queues > adapter->min_tx_queues)
1702 adapter->req_tx_queues--;
1703 else
1704 more++;
1705 break;
1706 }
1707 }
1708
1709 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1710 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1711 if (!adapter->tx_scrq)
1712 goto tx_failed;
1713
1714 for (i = 0; i < adapter->req_tx_queues; i++) {
1715 adapter->tx_scrq[i] = allqueues[i];
1716 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001717 }
1718
1719 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1720 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1721 if (!adapter->rx_scrq)
1722 goto rx_failed;
1723
1724 for (i = 0; i < adapter->req_rx_queues; i++) {
1725 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1726 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001727 }
1728
1729 memset(&crq, 0, sizeof(crq));
1730 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1731 crq.request_capability.cmd = REQUEST_CAPABILITY;
1732
1733 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001734 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001735 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001736 ibmvnic_send_crq(adapter, &crq);
1737
1738 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001739 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001740 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001741 ibmvnic_send_crq(adapter, &crq);
1742
1743 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001744 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001745 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001746 ibmvnic_send_crq(adapter, &crq);
1747
1748 crq.request_capability.capability =
1749 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1750 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001751 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001752 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001753 ibmvnic_send_crq(adapter, &crq);
1754
1755 crq.request_capability.capability =
1756 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1757 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001758 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001759 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001760 ibmvnic_send_crq(adapter, &crq);
1761
1762 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001763 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06001764 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001765 ibmvnic_send_crq(adapter, &crq);
1766
1767 if (adapter->netdev->flags & IFF_PROMISC) {
1768 if (adapter->promisc_supported) {
1769 crq.request_capability.capability =
1770 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001771 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06001772 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001773 ibmvnic_send_crq(adapter, &crq);
1774 }
1775 } else {
1776 crq.request_capability.capability =
1777 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001778 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06001779 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001780 ibmvnic_send_crq(adapter, &crq);
1781 }
1782
1783 kfree(allqueues);
1784
1785 return;
1786
Thomas Falcon032c5e82015-12-21 11:26:06 -06001787rx_failed:
1788 kfree(adapter->tx_scrq);
1789 adapter->tx_scrq = NULL;
1790tx_failed:
1791 for (i = 0; i < registered_queues; i++)
1792 release_sub_crq_queue(adapter, allqueues[i]);
1793 kfree(allqueues);
1794allqueues_failed:
1795 ibmvnic_remove(adapter->vdev);
1796}
1797
1798static int pending_scrq(struct ibmvnic_adapter *adapter,
1799 struct ibmvnic_sub_crq_queue *scrq)
1800{
1801 union sub_crq *entry = &scrq->msgs[scrq->cur];
1802
1803 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1804 return 1;
1805 else
1806 return 0;
1807}
1808
1809static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1810 struct ibmvnic_sub_crq_queue *scrq)
1811{
1812 union sub_crq *entry;
1813 unsigned long flags;
1814
1815 spin_lock_irqsave(&scrq->lock, flags);
1816 entry = &scrq->msgs[scrq->cur];
1817 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1818 if (++scrq->cur == scrq->size)
1819 scrq->cur = 0;
1820 } else {
1821 entry = NULL;
1822 }
1823 spin_unlock_irqrestore(&scrq->lock, flags);
1824
1825 return entry;
1826}
1827
1828static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1829{
1830 struct ibmvnic_crq_queue *queue = &adapter->crq;
1831 union ibmvnic_crq *crq;
1832
1833 crq = &queue->msgs[queue->cur];
1834 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1835 if (++queue->cur == queue->size)
1836 queue->cur = 0;
1837 } else {
1838 crq = NULL;
1839 }
1840
1841 return crq;
1842}
1843
1844static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1845 union sub_crq *sub_crq)
1846{
1847 unsigned int ua = adapter->vdev->unit_address;
1848 struct device *dev = &adapter->vdev->dev;
1849 u64 *u64_crq = (u64 *)sub_crq;
1850 int rc;
1851
1852 netdev_dbg(adapter->netdev,
1853 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1854 (unsigned long int)cpu_to_be64(remote_handle),
1855 (unsigned long int)cpu_to_be64(u64_crq[0]),
1856 (unsigned long int)cpu_to_be64(u64_crq[1]),
1857 (unsigned long int)cpu_to_be64(u64_crq[2]),
1858 (unsigned long int)cpu_to_be64(u64_crq[3]));
1859
1860 /* Make sure the hypervisor sees the complete request */
1861 mb();
1862
1863 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1864 cpu_to_be64(remote_handle),
1865 cpu_to_be64(u64_crq[0]),
1866 cpu_to_be64(u64_crq[1]),
1867 cpu_to_be64(u64_crq[2]),
1868 cpu_to_be64(u64_crq[3]));
1869
1870 if (rc) {
1871 if (rc == H_CLOSED)
1872 dev_warn(dev, "CRQ Queue closed\n");
1873 dev_err(dev, "Send error (rc=%d)\n", rc);
1874 }
1875
1876 return rc;
1877}
1878
Thomas Falconad7775d2016-04-01 17:20:34 -05001879static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1880 u64 remote_handle, u64 ioba, u64 num_entries)
1881{
1882 unsigned int ua = adapter->vdev->unit_address;
1883 struct device *dev = &adapter->vdev->dev;
1884 int rc;
1885
1886 /* Make sure the hypervisor sees the complete request */
1887 mb();
1888 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1889 cpu_to_be64(remote_handle),
1890 ioba, num_entries);
1891
1892 if (rc) {
1893 if (rc == H_CLOSED)
1894 dev_warn(dev, "CRQ Queue closed\n");
1895 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1896 }
1897
1898 return rc;
1899}
1900
Thomas Falcon032c5e82015-12-21 11:26:06 -06001901static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1902 union ibmvnic_crq *crq)
1903{
1904 unsigned int ua = adapter->vdev->unit_address;
1905 struct device *dev = &adapter->vdev->dev;
1906 u64 *u64_crq = (u64 *)crq;
1907 int rc;
1908
1909 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1910 (unsigned long int)cpu_to_be64(u64_crq[0]),
1911 (unsigned long int)cpu_to_be64(u64_crq[1]));
1912
1913 /* Make sure the hypervisor sees the complete request */
1914 mb();
1915
1916 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1917 cpu_to_be64(u64_crq[0]),
1918 cpu_to_be64(u64_crq[1]));
1919
1920 if (rc) {
1921 if (rc == H_CLOSED)
1922 dev_warn(dev, "CRQ Queue closed\n");
1923 dev_warn(dev, "Send error (rc=%d)\n", rc);
1924 }
1925
1926 return rc;
1927}
1928
1929static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1930{
1931 union ibmvnic_crq crq;
1932
1933 memset(&crq, 0, sizeof(crq));
1934 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1935 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1936 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1937
1938 return ibmvnic_send_crq(adapter, &crq);
1939}
1940
1941static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1942{
1943 union ibmvnic_crq crq;
1944
1945 memset(&crq, 0, sizeof(crq));
1946 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1947 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1948 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1949
1950 return ibmvnic_send_crq(adapter, &crq);
1951}
1952
1953static int send_version_xchg(struct ibmvnic_adapter *adapter)
1954{
1955 union ibmvnic_crq crq;
1956
1957 memset(&crq, 0, sizeof(crq));
1958 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1959 crq.version_exchange.cmd = VERSION_EXCHANGE;
1960 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1961
1962 return ibmvnic_send_crq(adapter, &crq);
1963}
1964
1965static void send_login(struct ibmvnic_adapter *adapter)
1966{
1967 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1968 struct ibmvnic_login_buffer *login_buffer;
1969 struct ibmvnic_inflight_cmd *inflight_cmd;
1970 struct device *dev = &adapter->vdev->dev;
1971 dma_addr_t rsp_buffer_token;
1972 dma_addr_t buffer_token;
1973 size_t rsp_buffer_size;
1974 union ibmvnic_crq crq;
1975 unsigned long flags;
1976 size_t buffer_size;
1977 __be64 *tx_list_p;
1978 __be64 *rx_list_p;
1979 int i;
1980
1981 buffer_size =
1982 sizeof(struct ibmvnic_login_buffer) +
1983 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1984
1985 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1986 if (!login_buffer)
1987 goto buf_alloc_failed;
1988
1989 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1990 DMA_TO_DEVICE);
1991 if (dma_mapping_error(dev, buffer_token)) {
1992 dev_err(dev, "Couldn't map login buffer\n");
1993 goto buf_map_failed;
1994 }
1995
John Allen498cd8e2016-04-06 11:49:55 -05001996 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1997 sizeof(u64) * adapter->req_tx_queues +
1998 sizeof(u64) * adapter->req_rx_queues +
1999 sizeof(u64) * adapter->req_rx_queues +
2000 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002001
2002 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2003 if (!login_rsp_buffer)
2004 goto buf_rsp_alloc_failed;
2005
2006 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2007 rsp_buffer_size, DMA_FROM_DEVICE);
2008 if (dma_mapping_error(dev, rsp_buffer_token)) {
2009 dev_err(dev, "Couldn't map login rsp buffer\n");
2010 goto buf_rsp_map_failed;
2011 }
2012 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2013 if (!inflight_cmd) {
2014 dev_err(dev, "Couldn't allocate inflight_cmd\n");
2015 goto inflight_alloc_failed;
2016 }
2017 adapter->login_buf = login_buffer;
2018 adapter->login_buf_token = buffer_token;
2019 adapter->login_buf_sz = buffer_size;
2020 adapter->login_rsp_buf = login_rsp_buffer;
2021 adapter->login_rsp_buf_token = rsp_buffer_token;
2022 adapter->login_rsp_buf_sz = rsp_buffer_size;
2023
2024 login_buffer->len = cpu_to_be32(buffer_size);
2025 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2026 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2027 login_buffer->off_txcomp_subcrqs =
2028 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2029 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2030 login_buffer->off_rxcomp_subcrqs =
2031 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2032 sizeof(u64) * adapter->req_tx_queues);
2033 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2034 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2035
2036 tx_list_p = (__be64 *)((char *)login_buffer +
2037 sizeof(struct ibmvnic_login_buffer));
2038 rx_list_p = (__be64 *)((char *)login_buffer +
2039 sizeof(struct ibmvnic_login_buffer) +
2040 sizeof(u64) * adapter->req_tx_queues);
2041
2042 for (i = 0; i < adapter->req_tx_queues; i++) {
2043 if (adapter->tx_scrq[i]) {
2044 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2045 crq_num);
2046 }
2047 }
2048
2049 for (i = 0; i < adapter->req_rx_queues; i++) {
2050 if (adapter->rx_scrq[i]) {
2051 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2052 crq_num);
2053 }
2054 }
2055
2056 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2057 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2058 netdev_dbg(adapter->netdev, "%016lx\n",
2059 ((unsigned long int *)(adapter->login_buf))[i]);
2060 }
2061
2062 memset(&crq, 0, sizeof(crq));
2063 crq.login.first = IBMVNIC_CRQ_CMD;
2064 crq.login.cmd = LOGIN;
2065 crq.login.ioba = cpu_to_be32(buffer_token);
2066 crq.login.len = cpu_to_be32(buffer_size);
2067
2068 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2069
2070 spin_lock_irqsave(&adapter->inflight_lock, flags);
2071 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2072 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2073
2074 ibmvnic_send_crq(adapter, &crq);
2075
2076 return;
2077
2078inflight_alloc_failed:
2079 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
2080 DMA_FROM_DEVICE);
2081buf_rsp_map_failed:
2082 kfree(login_rsp_buffer);
2083buf_rsp_alloc_failed:
2084 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2085buf_map_failed:
2086 kfree(login_buffer);
2087buf_alloc_failed:
2088 return;
2089}
2090
2091static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2092 u32 len, u8 map_id)
2093{
2094 union ibmvnic_crq crq;
2095
2096 memset(&crq, 0, sizeof(crq));
2097 crq.request_map.first = IBMVNIC_CRQ_CMD;
2098 crq.request_map.cmd = REQUEST_MAP;
2099 crq.request_map.map_id = map_id;
2100 crq.request_map.ioba = cpu_to_be32(addr);
2101 crq.request_map.len = cpu_to_be32(len);
2102 ibmvnic_send_crq(adapter, &crq);
2103}
2104
2105static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2106{
2107 union ibmvnic_crq crq;
2108
2109 memset(&crq, 0, sizeof(crq));
2110 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2111 crq.request_unmap.cmd = REQUEST_UNMAP;
2112 crq.request_unmap.map_id = map_id;
2113 ibmvnic_send_crq(adapter, &crq);
2114}
2115
2116static void send_map_query(struct ibmvnic_adapter *adapter)
2117{
2118 union ibmvnic_crq crq;
2119
2120 memset(&crq, 0, sizeof(crq));
2121 crq.query_map.first = IBMVNIC_CRQ_CMD;
2122 crq.query_map.cmd = QUERY_MAP;
2123 ibmvnic_send_crq(adapter, &crq);
2124}
2125
2126/* Send a series of CRQs requesting various capabilities of the VNIC server */
2127static void send_cap_queries(struct ibmvnic_adapter *adapter)
2128{
2129 union ibmvnic_crq crq;
2130
Thomas Falcon901e0402017-02-15 12:17:59 -06002131 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002132 memset(&crq, 0, sizeof(crq));
2133 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2134 crq.query_capability.cmd = QUERY_CAPABILITY;
2135
2136 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002137 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002138 ibmvnic_send_crq(adapter, &crq);
2139
2140 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002141 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002142 ibmvnic_send_crq(adapter, &crq);
2143
2144 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002145 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002146 ibmvnic_send_crq(adapter, &crq);
2147
2148 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002149 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002150 ibmvnic_send_crq(adapter, &crq);
2151
2152 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002153 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002154 ibmvnic_send_crq(adapter, &crq);
2155
2156 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002157 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002158 ibmvnic_send_crq(adapter, &crq);
2159
2160 crq.query_capability.capability =
2161 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002162 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002163 ibmvnic_send_crq(adapter, &crq);
2164
2165 crq.query_capability.capability =
2166 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002167 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002168 ibmvnic_send_crq(adapter, &crq);
2169
2170 crq.query_capability.capability =
2171 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002172 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002173 ibmvnic_send_crq(adapter, &crq);
2174
2175 crq.query_capability.capability =
2176 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002177 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002178 ibmvnic_send_crq(adapter, &crq);
2179
2180 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06002181 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002182 ibmvnic_send_crq(adapter, &crq);
2183
2184 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002185 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002186 ibmvnic_send_crq(adapter, &crq);
2187
2188 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002189 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002190 ibmvnic_send_crq(adapter, &crq);
2191
2192 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002193 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002194 ibmvnic_send_crq(adapter, &crq);
2195
2196 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06002197 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002198 ibmvnic_send_crq(adapter, &crq);
2199
2200 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06002201 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002202 ibmvnic_send_crq(adapter, &crq);
2203
2204 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002205 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002206 ibmvnic_send_crq(adapter, &crq);
2207
2208 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002209 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002210 ibmvnic_send_crq(adapter, &crq);
2211
2212 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002213 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002214 ibmvnic_send_crq(adapter, &crq);
2215
2216 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002217 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002218 ibmvnic_send_crq(adapter, &crq);
2219
2220 crq.query_capability.capability =
2221 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06002222 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002223 ibmvnic_send_crq(adapter, &crq);
2224
2225 crq.query_capability.capability =
2226 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002227 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002228 ibmvnic_send_crq(adapter, &crq);
2229
2230 crq.query_capability.capability =
2231 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002232 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002233 ibmvnic_send_crq(adapter, &crq);
2234
2235 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002236 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002237 ibmvnic_send_crq(adapter, &crq);
2238}
2239
2240static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2241{
2242 struct device *dev = &adapter->vdev->dev;
2243 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2244 union ibmvnic_crq crq;
2245 int i;
2246
2247 dma_unmap_single(dev, adapter->ip_offload_tok,
2248 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2249
2250 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2251 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2252 netdev_dbg(adapter->netdev, "%016lx\n",
2253 ((unsigned long int *)(buf))[i]);
2254
2255 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2256 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2257 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2258 buf->tcp_ipv4_chksum);
2259 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2260 buf->tcp_ipv6_chksum);
2261 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2262 buf->udp_ipv4_chksum);
2263 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2264 buf->udp_ipv6_chksum);
2265 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2266 buf->large_tx_ipv4);
2267 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2268 buf->large_tx_ipv6);
2269 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2270 buf->large_rx_ipv4);
2271 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2272 buf->large_rx_ipv6);
2273 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2274 buf->max_ipv4_header_size);
2275 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2276 buf->max_ipv6_header_size);
2277 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2278 buf->max_tcp_header_size);
2279 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2280 buf->max_udp_header_size);
2281 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2282 buf->max_large_tx_size);
2283 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2284 buf->max_large_rx_size);
2285 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2286 buf->ipv6_extension_header);
2287 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2288 buf->tcp_pseudosum_req);
2289 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2290 buf->num_ipv6_ext_headers);
2291 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2292 buf->off_ipv6_ext_headers);
2293
2294 adapter->ip_offload_ctrl_tok =
2295 dma_map_single(dev, &adapter->ip_offload_ctrl,
2296 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2297
2298 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2299 dev_err(dev, "Couldn't map ip offload control buffer\n");
2300 return;
2301 }
2302
2303 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2304 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2305 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2306 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2307 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2308
2309 /* large_tx/rx disabled for now, additional features needed */
2310 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2311 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2312 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2313 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2314
2315 adapter->netdev->features = NETIF_F_GSO;
2316
2317 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2318 adapter->netdev->features |= NETIF_F_IP_CSUM;
2319
2320 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2321 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2322
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002323 if ((adapter->netdev->features &
2324 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2325 adapter->netdev->features |= NETIF_F_RXCSUM;
2326
Thomas Falcon032c5e82015-12-21 11:26:06 -06002327 memset(&crq, 0, sizeof(crq));
2328 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2329 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2330 crq.control_ip_offload.len =
2331 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2332 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2333 ibmvnic_send_crq(adapter, &crq);
2334}
2335
2336static void handle_error_info_rsp(union ibmvnic_crq *crq,
2337 struct ibmvnic_adapter *adapter)
2338{
2339 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002340 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002341 unsigned long flags;
2342 bool found = false;
2343 int i;
2344
2345 if (!crq->request_error_rsp.rc.code) {
2346 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2347 crq->request_error_rsp.rc.code);
2348 return;
2349 }
2350
2351 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002352 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002353 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2354 found = true;
2355 list_del(&error_buff->list);
2356 break;
2357 }
2358 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2359
2360 if (!found) {
2361 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falcon75224c92017-02-15 10:33:33 -06002362 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002363 return;
2364 }
2365
2366 dev_err(dev, "Detailed info for error id %x:",
Thomas Falcon75224c92017-02-15 10:33:33 -06002367 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002368
2369 for (i = 0; i < error_buff->len; i++) {
2370 pr_cont("%02x", (int)error_buff->buff[i]);
2371 if (i % 8 == 7)
2372 pr_cont(" ");
2373 }
2374 pr_cont("\n");
2375
2376 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2377 DMA_FROM_DEVICE);
2378 kfree(error_buff->buff);
2379 kfree(error_buff);
2380}
2381
Thomas Falcon032c5e82015-12-21 11:26:06 -06002382static void handle_error_indication(union ibmvnic_crq *crq,
2383 struct ibmvnic_adapter *adapter)
2384{
2385 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2386 struct ibmvnic_inflight_cmd *inflight_cmd;
2387 struct device *dev = &adapter->vdev->dev;
2388 struct ibmvnic_error_buff *error_buff;
2389 union ibmvnic_crq new_crq;
2390 unsigned long flags;
2391
2392 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2393 crq->error_indication.
2394 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
Thomas Falcon75224c92017-02-15 10:33:33 -06002395 be32_to_cpu(crq->error_indication.error_id),
2396 be16_to_cpu(crq->error_indication.error_cause));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002397
2398 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2399 if (!error_buff)
2400 return;
2401
2402 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2403 if (!error_buff->buff) {
2404 kfree(error_buff);
2405 return;
2406 }
2407
2408 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2409 DMA_FROM_DEVICE);
2410 if (dma_mapping_error(dev, error_buff->dma)) {
2411 if (!firmware_has_feature(FW_FEATURE_CMO))
2412 dev_err(dev, "Couldn't map error buffer\n");
2413 kfree(error_buff->buff);
2414 kfree(error_buff);
2415 return;
2416 }
2417
2418 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2419 if (!inflight_cmd) {
2420 dma_unmap_single(dev, error_buff->dma, detail_len,
2421 DMA_FROM_DEVICE);
2422 kfree(error_buff->buff);
2423 kfree(error_buff);
2424 return;
2425 }
2426
2427 error_buff->len = detail_len;
2428 error_buff->error_id = crq->error_indication.error_id;
2429
2430 spin_lock_irqsave(&adapter->error_list_lock, flags);
2431 list_add_tail(&error_buff->list, &adapter->errors);
2432 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2433
2434 memset(&new_crq, 0, sizeof(new_crq));
2435 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2436 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2437 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2438 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2439 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2440
2441 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2442
2443 spin_lock_irqsave(&adapter->inflight_lock, flags);
2444 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2445 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2446
2447 ibmvnic_send_crq(adapter, &new_crq);
2448}
2449
2450static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2451 struct ibmvnic_adapter *adapter)
2452{
2453 struct net_device *netdev = adapter->netdev;
2454 struct device *dev = &adapter->vdev->dev;
2455 long rc;
2456
2457 rc = crq->change_mac_addr_rsp.rc.code;
2458 if (rc) {
2459 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2460 return;
2461 }
2462 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2463 ETH_ALEN);
2464}
2465
2466static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2467 struct ibmvnic_adapter *adapter)
2468{
2469 struct device *dev = &adapter->vdev->dev;
2470 u64 *req_value;
2471 char *name;
2472
Thomas Falcon901e0402017-02-15 12:17:59 -06002473 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002474 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2475 case REQ_TX_QUEUES:
2476 req_value = &adapter->req_tx_queues;
2477 name = "tx";
2478 break;
2479 case REQ_RX_QUEUES:
2480 req_value = &adapter->req_rx_queues;
2481 name = "rx";
2482 break;
2483 case REQ_RX_ADD_QUEUES:
2484 req_value = &adapter->req_rx_add_queues;
2485 name = "rx_add";
2486 break;
2487 case REQ_TX_ENTRIES_PER_SUBCRQ:
2488 req_value = &adapter->req_tx_entries_per_subcrq;
2489 name = "tx_entries_per_subcrq";
2490 break;
2491 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2492 req_value = &adapter->req_rx_add_entries_per_subcrq;
2493 name = "rx_add_entries_per_subcrq";
2494 break;
2495 case REQ_MTU:
2496 req_value = &adapter->req_mtu;
2497 name = "mtu";
2498 break;
2499 case PROMISC_REQUESTED:
2500 req_value = &adapter->promisc;
2501 name = "promisc";
2502 break;
2503 default:
2504 dev_err(dev, "Got invalid cap request rsp %d\n",
2505 crq->request_capability.capability);
2506 return;
2507 }
2508
2509 switch (crq->request_capability_rsp.rc.code) {
2510 case SUCCESS:
2511 break;
2512 case PARTIALSUCCESS:
2513 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2514 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06002515 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06002516 number), name);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002517 release_sub_crqs(adapter);
Thomas Falcon28f4d162017-02-15 10:32:11 -06002518 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
Thomas Falconea22d512016-07-06 15:35:17 -05002519 init_sub_crqs(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002520 return;
2521 default:
2522 dev_err(dev, "Error %d in request cap rsp\n",
2523 crq->request_capability_rsp.rc.code);
2524 return;
2525 }
2526
2527 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06002528 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002529 union ibmvnic_crq newcrq;
2530 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2531 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2532 &adapter->ip_offload_buf;
2533
Thomas Falcon249168a2017-02-15 12:18:00 -06002534 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002535 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2536 buf_sz,
2537 DMA_FROM_DEVICE);
2538
2539 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2540 if (!firmware_has_feature(FW_FEATURE_CMO))
2541 dev_err(dev, "Couldn't map offload buffer\n");
2542 return;
2543 }
2544
2545 memset(&newcrq, 0, sizeof(newcrq));
2546 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2547 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2548 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2549 newcrq.query_ip_offload.ioba =
2550 cpu_to_be32(adapter->ip_offload_tok);
2551
2552 ibmvnic_send_crq(adapter, &newcrq);
2553 }
2554}
2555
2556static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2557 struct ibmvnic_adapter *adapter)
2558{
2559 struct device *dev = &adapter->vdev->dev;
2560 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2561 struct ibmvnic_login_buffer *login = adapter->login_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002562 int i;
2563
2564 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2565 DMA_BIDIRECTIONAL);
2566 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2567 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2568
John Allen498cd8e2016-04-06 11:49:55 -05002569 /* If the number of queues requested can't be allocated by the
2570 * server, the login response will return with code 1. We will need
2571 * to resend the login buffer with fewer queues requested.
2572 */
2573 if (login_rsp_crq->generic.rc.code) {
2574 adapter->renegotiate = true;
2575 complete(&adapter->init_done);
2576 return 0;
2577 }
2578
Thomas Falcon032c5e82015-12-21 11:26:06 -06002579 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2580 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2581 netdev_dbg(adapter->netdev, "%016lx\n",
2582 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2583 }
2584
2585 /* Sanity checks */
2586 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2587 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2588 adapter->req_rx_add_queues !=
2589 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2590 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2591 ibmvnic_remove(adapter->vdev);
2592 return -EIO;
2593 }
2594 complete(&adapter->init_done);
2595
Thomas Falcon032c5e82015-12-21 11:26:06 -06002596 return 0;
2597}
2598
2599static void handle_request_map_rsp(union ibmvnic_crq *crq,
2600 struct ibmvnic_adapter *adapter)
2601{
2602 struct device *dev = &adapter->vdev->dev;
2603 u8 map_id = crq->request_map_rsp.map_id;
2604 int tx_subcrqs;
2605 int rx_subcrqs;
2606 long rc;
2607 int i;
2608
2609 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2610 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2611
2612 rc = crq->request_map_rsp.rc.code;
2613 if (rc) {
2614 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2615 adapter->map_id--;
2616 /* need to find and zero tx/rx_pool map_id */
2617 for (i = 0; i < tx_subcrqs; i++) {
2618 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2619 adapter->tx_pool[i].long_term_buff.map_id = 0;
2620 }
2621 for (i = 0; i < rx_subcrqs; i++) {
2622 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2623 adapter->rx_pool[i].long_term_buff.map_id = 0;
2624 }
2625 }
2626 complete(&adapter->fw_done);
2627}
2628
2629static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2630 struct ibmvnic_adapter *adapter)
2631{
2632 struct device *dev = &adapter->vdev->dev;
2633 long rc;
2634
2635 rc = crq->request_unmap_rsp.rc.code;
2636 if (rc)
2637 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2638}
2639
2640static void handle_query_map_rsp(union ibmvnic_crq *crq,
2641 struct ibmvnic_adapter *adapter)
2642{
2643 struct net_device *netdev = adapter->netdev;
2644 struct device *dev = &adapter->vdev->dev;
2645 long rc;
2646
2647 rc = crq->query_map_rsp.rc.code;
2648 if (rc) {
2649 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2650 return;
2651 }
2652 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2653 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2654 crq->query_map_rsp.free_pages);
2655}
2656
2657static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2658 struct ibmvnic_adapter *adapter)
2659{
2660 struct net_device *netdev = adapter->netdev;
2661 struct device *dev = &adapter->vdev->dev;
2662 long rc;
2663
Thomas Falcon901e0402017-02-15 12:17:59 -06002664 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002665 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06002666 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002667 rc = crq->query_capability.rc.code;
2668 if (rc) {
2669 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2670 goto out;
2671 }
2672
2673 switch (be16_to_cpu(crq->query_capability.capability)) {
2674 case MIN_TX_QUEUES:
2675 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002676 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002677 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2678 adapter->min_tx_queues);
2679 break;
2680 case MIN_RX_QUEUES:
2681 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002682 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002683 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2684 adapter->min_rx_queues);
2685 break;
2686 case MIN_RX_ADD_QUEUES:
2687 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002688 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002689 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2690 adapter->min_rx_add_queues);
2691 break;
2692 case MAX_TX_QUEUES:
2693 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002694 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002695 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2696 adapter->max_tx_queues);
2697 break;
2698 case MAX_RX_QUEUES:
2699 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002700 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002701 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2702 adapter->max_rx_queues);
2703 break;
2704 case MAX_RX_ADD_QUEUES:
2705 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002706 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002707 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2708 adapter->max_rx_add_queues);
2709 break;
2710 case MIN_TX_ENTRIES_PER_SUBCRQ:
2711 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002712 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002713 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2714 adapter->min_tx_entries_per_subcrq);
2715 break;
2716 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2717 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002718 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002719 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2720 adapter->min_rx_add_entries_per_subcrq);
2721 break;
2722 case MAX_TX_ENTRIES_PER_SUBCRQ:
2723 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002724 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002725 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2726 adapter->max_tx_entries_per_subcrq);
2727 break;
2728 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2729 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002730 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002731 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2732 adapter->max_rx_add_entries_per_subcrq);
2733 break;
2734 case TCP_IP_OFFLOAD:
2735 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002736 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002737 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2738 adapter->tcp_ip_offload);
2739 break;
2740 case PROMISC_SUPPORTED:
2741 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002742 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002743 netdev_dbg(netdev, "promisc_supported = %lld\n",
2744 adapter->promisc_supported);
2745 break;
2746 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002747 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002748 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002749 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2750 break;
2751 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002752 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002753 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002754 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2755 break;
2756 case MAX_MULTICAST_FILTERS:
2757 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002758 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002759 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2760 adapter->max_multicast_filters);
2761 break;
2762 case VLAN_HEADER_INSERTION:
2763 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002764 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002765 if (adapter->vlan_header_insertion)
2766 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2767 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2768 adapter->vlan_header_insertion);
2769 break;
2770 case MAX_TX_SG_ENTRIES:
2771 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002772 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002773 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2774 adapter->max_tx_sg_entries);
2775 break;
2776 case RX_SG_SUPPORTED:
2777 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002778 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002779 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2780 adapter->rx_sg_supported);
2781 break;
2782 case OPT_TX_COMP_SUB_QUEUES:
2783 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002784 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002785 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2786 adapter->opt_tx_comp_sub_queues);
2787 break;
2788 case OPT_RX_COMP_QUEUES:
2789 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002790 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002791 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2792 adapter->opt_rx_comp_queues);
2793 break;
2794 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2795 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002796 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002797 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2798 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2799 break;
2800 case OPT_TX_ENTRIES_PER_SUBCRQ:
2801 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002802 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002803 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2804 adapter->opt_tx_entries_per_subcrq);
2805 break;
2806 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2807 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002808 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002809 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2810 adapter->opt_rxba_entries_per_subcrq);
2811 break;
2812 case TX_RX_DESC_REQ:
2813 adapter->tx_rx_desc_req = crq->query_capability.number;
2814 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2815 adapter->tx_rx_desc_req);
2816 break;
2817
2818 default:
2819 netdev_err(netdev, "Got invalid cap rsp %d\n",
2820 crq->query_capability.capability);
2821 }
2822
2823out:
Thomas Falcon249168a2017-02-15 12:18:00 -06002824 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2825 adapter->wait_capability = false;
Thomas Falconea22d512016-07-06 15:35:17 -05002826 init_sub_crqs(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002827 /* We're done querying the capabilities, initialize sub-crqs */
Thomas Falcon249168a2017-02-15 12:18:00 -06002828 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002829}
2830
Thomas Falcon032c5e82015-12-21 11:26:06 -06002831static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
2832{
Wei Yongjun96183182016-06-27 20:48:53 +08002833 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002834 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002835 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002836 unsigned long flags;
2837 unsigned long flags2;
2838
2839 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002840 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002841 switch (inflight_cmd->crq.generic.cmd) {
2842 case LOGIN:
2843 dma_unmap_single(dev, adapter->login_buf_token,
2844 adapter->login_buf_sz,
2845 DMA_BIDIRECTIONAL);
2846 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2847 adapter->login_rsp_buf_sz,
2848 DMA_BIDIRECTIONAL);
2849 kfree(adapter->login_rsp_buf);
2850 kfree(adapter->login_buf);
2851 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002852 case REQUEST_ERROR_INFO:
2853 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08002854 list_for_each_entry_safe(error_buff, tmp2,
2855 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002856 dma_unmap_single(dev, error_buff->dma,
2857 error_buff->len,
2858 DMA_FROM_DEVICE);
2859 kfree(error_buff->buff);
2860 list_del(&error_buff->list);
2861 kfree(error_buff);
2862 }
2863 spin_unlock_irqrestore(&adapter->error_list_lock,
2864 flags2);
2865 break;
2866 }
2867 list_del(&inflight_cmd->list);
2868 kfree(inflight_cmd);
2869 }
2870 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2871}
2872
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002873static void ibmvnic_xport_event(struct work_struct *work)
2874{
2875 struct ibmvnic_adapter *adapter = container_of(work,
2876 struct ibmvnic_adapter,
2877 ibmvnic_xport);
2878 struct device *dev = &adapter->vdev->dev;
2879 long rc;
2880
2881 ibmvnic_free_inflight(adapter);
2882 release_sub_crqs(adapter);
2883 if (adapter->migrated) {
2884 rc = ibmvnic_reenable_crq_queue(adapter);
2885 if (rc)
2886 dev_err(dev, "Error after enable rc=%ld\n", rc);
2887 adapter->migrated = false;
2888 rc = ibmvnic_send_crq_init(adapter);
2889 if (rc)
2890 dev_err(dev, "Error sending init rc=%ld\n", rc);
2891 }
2892}
2893
Thomas Falcon032c5e82015-12-21 11:26:06 -06002894static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2895 struct ibmvnic_adapter *adapter)
2896{
2897 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2898 struct net_device *netdev = adapter->netdev;
2899 struct device *dev = &adapter->vdev->dev;
2900 long rc;
2901
2902 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
2903 ((unsigned long int *)crq)[0],
2904 ((unsigned long int *)crq)[1]);
2905 switch (gen_crq->first) {
2906 case IBMVNIC_CRQ_INIT_RSP:
2907 switch (gen_crq->cmd) {
2908 case IBMVNIC_CRQ_INIT:
2909 dev_info(dev, "Partner initialized\n");
2910 /* Send back a response */
2911 rc = ibmvnic_send_crq_init_complete(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05002912 if (!rc)
2913 schedule_work(&adapter->vnic_crq_init);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002914 else
2915 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
2916 break;
2917 case IBMVNIC_CRQ_INIT_COMPLETE:
2918 dev_info(dev, "Partner initialization complete\n");
2919 send_version_xchg(adapter);
2920 break;
2921 default:
2922 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
2923 }
2924 return;
2925 case IBMVNIC_CRQ_XPORT_EVENT:
2926 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
2927 dev_info(dev, "Re-enabling adapter\n");
2928 adapter->migrated = true;
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002929 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcondfad09a2016-08-18 11:37:51 -05002930 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
2931 dev_info(dev, "Backing device failover detected\n");
2932 netif_carrier_off(netdev);
2933 adapter->failover = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002934 } else {
2935 /* The adapter lost the connection */
2936 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
2937 gen_crq->cmd);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002938 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002939 }
2940 return;
2941 case IBMVNIC_CRQ_CMD_RSP:
2942 break;
2943 default:
2944 dev_err(dev, "Got an invalid msg type 0x%02x\n",
2945 gen_crq->first);
2946 return;
2947 }
2948
2949 switch (gen_crq->cmd) {
2950 case VERSION_EXCHANGE_RSP:
2951 rc = crq->version_exchange_rsp.rc.code;
2952 if (rc) {
2953 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
2954 break;
2955 }
2956 dev_info(dev, "Partner protocol version is %d\n",
2957 crq->version_exchange_rsp.version);
2958 if (be16_to_cpu(crq->version_exchange_rsp.version) <
2959 ibmvnic_version)
2960 ibmvnic_version =
2961 be16_to_cpu(crq->version_exchange_rsp.version);
2962 send_cap_queries(adapter);
2963 break;
2964 case QUERY_CAPABILITY_RSP:
2965 handle_query_cap_rsp(crq, adapter);
2966 break;
2967 case QUERY_MAP_RSP:
2968 handle_query_map_rsp(crq, adapter);
2969 break;
2970 case REQUEST_MAP_RSP:
2971 handle_request_map_rsp(crq, adapter);
2972 break;
2973 case REQUEST_UNMAP_RSP:
2974 handle_request_unmap_rsp(crq, adapter);
2975 break;
2976 case REQUEST_CAPABILITY_RSP:
2977 handle_request_cap_rsp(crq, adapter);
2978 break;
2979 case LOGIN_RSP:
2980 netdev_dbg(netdev, "Got Login Response\n");
2981 handle_login_rsp(crq, adapter);
2982 break;
2983 case LOGICAL_LINK_STATE_RSP:
2984 netdev_dbg(netdev, "Got Logical Link State Response\n");
2985 adapter->logical_link_state =
2986 crq->logical_link_state_rsp.link_state;
2987 break;
2988 case LINK_STATE_INDICATION:
2989 netdev_dbg(netdev, "Got Logical Link State Indication\n");
2990 adapter->phys_link_state =
2991 crq->link_state_indication.phys_link_state;
2992 adapter->logical_link_state =
2993 crq->link_state_indication.logical_link_state;
2994 break;
2995 case CHANGE_MAC_ADDR_RSP:
2996 netdev_dbg(netdev, "Got MAC address change Response\n");
2997 handle_change_mac_rsp(crq, adapter);
2998 break;
2999 case ERROR_INDICATION:
3000 netdev_dbg(netdev, "Got Error Indication\n");
3001 handle_error_indication(crq, adapter);
3002 break;
3003 case REQUEST_ERROR_RSP:
3004 netdev_dbg(netdev, "Got Error Detail Response\n");
3005 handle_error_info_rsp(crq, adapter);
3006 break;
3007 case REQUEST_STATISTICS_RSP:
3008 netdev_dbg(netdev, "Got Statistics Response\n");
3009 complete(&adapter->stats_done);
3010 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003011 case QUERY_IP_OFFLOAD_RSP:
3012 netdev_dbg(netdev, "Got Query IP offload Response\n");
3013 handle_query_ip_offload_rsp(adapter);
3014 break;
3015 case MULTICAST_CTRL_RSP:
3016 netdev_dbg(netdev, "Got multicast control Response\n");
3017 break;
3018 case CONTROL_IP_OFFLOAD_RSP:
3019 netdev_dbg(netdev, "Got Control IP offload Response\n");
3020 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3021 sizeof(adapter->ip_offload_ctrl),
3022 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05003023 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003024 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003025 case COLLECT_FW_TRACE_RSP:
3026 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3027 complete(&adapter->fw_done);
3028 break;
3029 default:
3030 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3031 gen_crq->cmd);
3032 }
3033}
3034
3035static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3036{
3037 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06003038 unsigned long flags;
3039
3040 spin_lock_irqsave(&adapter->crq.lock, flags);
3041 vio_disable_interrupts(adapter->vdev);
3042 tasklet_schedule(&adapter->tasklet);
3043 spin_unlock_irqrestore(&adapter->crq.lock, flags);
3044 return IRQ_HANDLED;
3045}
3046
3047static void ibmvnic_tasklet(void *data)
3048{
3049 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003050 struct ibmvnic_crq_queue *queue = &adapter->crq;
3051 struct vio_dev *vdev = adapter->vdev;
3052 union ibmvnic_crq *crq;
3053 unsigned long flags;
3054 bool done = false;
3055
3056 spin_lock_irqsave(&queue->lock, flags);
3057 vio_disable_interrupts(vdev);
3058 while (!done) {
3059 /* Pull all the valid messages off the CRQ */
3060 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3061 ibmvnic_handle_crq(crq, adapter);
3062 crq->generic.first = 0;
3063 }
3064 vio_enable_interrupts(vdev);
3065 crq = ibmvnic_next_crq(adapter);
3066 if (crq) {
3067 vio_disable_interrupts(vdev);
3068 ibmvnic_handle_crq(crq, adapter);
3069 crq->generic.first = 0;
3070 } else {
Thomas Falcon249168a2017-02-15 12:18:00 -06003071 /* remain in tasklet until all
3072 * capabilities responses are received
3073 */
3074 if (!adapter->wait_capability)
3075 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003076 }
3077 }
Thomas Falcon249168a2017-02-15 12:18:00 -06003078 /* if capabilities CRQ's were sent in this tasklet, the following
3079 * tasklet must wait until all responses are received
3080 */
3081 if (atomic_read(&adapter->running_cap_crqs) != 0)
3082 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003083 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003084}
3085
3086static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3087{
3088 struct vio_dev *vdev = adapter->vdev;
3089 int rc;
3090
3091 do {
3092 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3093 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3094
3095 if (rc)
3096 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3097
3098 return rc;
3099}
3100
3101static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3102{
3103 struct ibmvnic_crq_queue *crq = &adapter->crq;
3104 struct device *dev = &adapter->vdev->dev;
3105 struct vio_dev *vdev = adapter->vdev;
3106 int rc;
3107
3108 /* Close the CRQ */
3109 do {
3110 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3111 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3112
3113 /* Clean out the queue */
3114 memset(crq->msgs, 0, PAGE_SIZE);
3115 crq->cur = 0;
3116
3117 /* And re-open it again */
3118 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3119 crq->msg_token, PAGE_SIZE);
3120
3121 if (rc == H_CLOSED)
3122 /* Adapter is good, but other end is not ready */
3123 dev_warn(dev, "Partner adapter not ready\n");
3124 else if (rc != 0)
3125 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3126
3127 return rc;
3128}
3129
Nathan Fontenotf9928872017-03-30 02:48:54 -04003130static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003131{
3132 struct ibmvnic_crq_queue *crq = &adapter->crq;
3133 struct vio_dev *vdev = adapter->vdev;
3134 long rc;
3135
Nathan Fontenotf9928872017-03-30 02:48:54 -04003136 if (!crq->msgs)
3137 return;
3138
Thomas Falcon032c5e82015-12-21 11:26:06 -06003139 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3140 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06003141 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003142 do {
3143 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3144 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3145
3146 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3147 DMA_BIDIRECTIONAL);
3148 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04003149 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003150}
3151
Nathan Fontenotf9928872017-03-30 02:48:54 -04003152static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003153{
3154 struct ibmvnic_crq_queue *crq = &adapter->crq;
3155 struct device *dev = &adapter->vdev->dev;
3156 struct vio_dev *vdev = adapter->vdev;
3157 int rc, retrc = -ENOMEM;
3158
Nathan Fontenotf9928872017-03-30 02:48:54 -04003159 if (crq->msgs)
3160 return 0;
3161
Thomas Falcon032c5e82015-12-21 11:26:06 -06003162 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3163 /* Should we allocate more than one page? */
3164
3165 if (!crq->msgs)
3166 return -ENOMEM;
3167
3168 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3169 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3170 DMA_BIDIRECTIONAL);
3171 if (dma_mapping_error(dev, crq->msg_token))
3172 goto map_failed;
3173
3174 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3175 crq->msg_token, PAGE_SIZE);
3176
3177 if (rc == H_RESOURCE)
3178 /* maybe kexecing and resource is busy. try a reset */
3179 rc = ibmvnic_reset_crq(adapter);
3180 retrc = rc;
3181
3182 if (rc == H_CLOSED) {
3183 dev_warn(dev, "Partner adapter not ready\n");
3184 } else if (rc) {
3185 dev_warn(dev, "Error %d opening adapter\n", rc);
3186 goto reg_crq_failed;
3187 }
3188
3189 retrc = 0;
3190
Thomas Falcon6c267b32017-02-15 12:17:58 -06003191 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3192 (unsigned long)adapter);
3193
Thomas Falcon032c5e82015-12-21 11:26:06 -06003194 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3195 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3196 adapter);
3197 if (rc) {
3198 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3199 vdev->irq, rc);
3200 goto req_irq_failed;
3201 }
3202
3203 rc = vio_enable_interrupts(vdev);
3204 if (rc) {
3205 dev_err(dev, "Error %d enabling interrupts\n", rc);
3206 goto req_irq_failed;
3207 }
3208
3209 crq->cur = 0;
3210 spin_lock_init(&crq->lock);
3211
3212 return retrc;
3213
3214req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06003215 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003216 do {
3217 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3218 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3219reg_crq_failed:
3220 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3221map_failed:
3222 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04003223 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003224 return retrc;
3225}
3226
Thomas Falcon65dc6892016-07-06 15:35:18 -05003227static void handle_crq_init_rsp(struct work_struct *work)
3228{
3229 struct ibmvnic_adapter *adapter = container_of(work,
3230 struct ibmvnic_adapter,
3231 vnic_crq_init);
3232 struct device *dev = &adapter->vdev->dev;
3233 struct net_device *netdev = adapter->netdev;
3234 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003235 bool restart = false;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003236 int rc;
3237
Thomas Falcondfad09a2016-08-18 11:37:51 -05003238 if (adapter->failover) {
3239 release_sub_crqs(adapter);
3240 if (netif_running(netdev)) {
3241 netif_tx_disable(netdev);
3242 ibmvnic_close(netdev);
3243 restart = true;
3244 }
3245 }
3246
Thomas Falcon65dc6892016-07-06 15:35:18 -05003247 reinit_completion(&adapter->init_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003248 send_version_xchg(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003249 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3250 dev_err(dev, "Passive init timeout\n");
3251 goto task_failed;
3252 }
3253
Thomas Falconf39f0d12017-02-14 10:22:59 -06003254 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003255
Thomas Falcondfad09a2016-08-18 11:37:51 -05003256 if (adapter->failover) {
3257 adapter->failover = false;
3258 if (restart) {
3259 rc = ibmvnic_open(netdev);
3260 if (rc)
3261 goto restart_failed;
3262 }
3263 netif_carrier_on(netdev);
3264 return;
3265 }
3266
Thomas Falcon65dc6892016-07-06 15:35:18 -05003267 rc = register_netdev(netdev);
3268 if (rc) {
3269 dev_err(dev,
3270 "failed to register netdev rc=%d\n", rc);
3271 goto register_failed;
3272 }
3273 dev_info(dev, "ibmvnic registered\n");
3274
3275 return;
3276
Thomas Falcondfad09a2016-08-18 11:37:51 -05003277restart_failed:
3278 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003279register_failed:
3280 release_sub_crqs(adapter);
3281task_failed:
3282 dev_err(dev, "Passive initialization was not successful\n");
3283}
3284
John Allenf6ef6402017-03-17 17:13:42 -05003285static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3286{
3287 struct device *dev = &adapter->vdev->dev;
3288 unsigned long timeout = msecs_to_jiffies(30000);
John Allenf6ef6402017-03-17 17:13:42 -05003289 int rc;
3290
Nathan Fontenotf9928872017-03-30 02:48:54 -04003291 rc = init_crq_queue(adapter);
John Allenf6ef6402017-03-17 17:13:42 -05003292 if (rc) {
3293 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3294 return rc;
3295 }
3296
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -04003297 rc = init_stats_token(adapter);
3298 if (rc) {
Nathan Fontenotf9928872017-03-30 02:48:54 -04003299 release_crq_queue(adapter);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -04003300 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05003301 }
3302
John Allenf6ef6402017-03-17 17:13:42 -05003303 init_completion(&adapter->init_done);
3304 ibmvnic_send_crq_init(adapter);
3305 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3306 dev_err(dev, "Initialization sequence timed out\n");
Nathan Fontenotf9928872017-03-30 02:48:54 -04003307 release_crq_queue(adapter);
John Allenf6ef6402017-03-17 17:13:42 -05003308 return -1;
3309 }
3310
3311 return 0;
3312}
3313
Thomas Falcon032c5e82015-12-21 11:26:06 -06003314static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3315{
3316 struct ibmvnic_adapter *adapter;
3317 struct net_device *netdev;
3318 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003319 int rc;
3320
3321 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3322 dev->unit_address);
3323
3324 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3325 VETH_MAC_ADDR, NULL);
3326 if (!mac_addr_p) {
3327 dev_err(&dev->dev,
3328 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3329 __FILE__, __LINE__);
3330 return 0;
3331 }
3332
3333 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3334 IBMVNIC_MAX_TX_QUEUES);
3335 if (!netdev)
3336 return -ENOMEM;
3337
3338 adapter = netdev_priv(netdev);
3339 dev_set_drvdata(&dev->dev, netdev);
3340 adapter->vdev = dev;
3341 adapter->netdev = netdev;
Thomas Falcondfad09a2016-08-18 11:37:51 -05003342 adapter->failover = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003343
3344 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3345 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3346 netdev->irq = dev->irq;
3347 netdev->netdev_ops = &ibmvnic_netdev_ops;
3348 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3349 SET_NETDEV_DEV(netdev, &dev->dev);
3350
Thomas Falcon65dc6892016-07-06 15:35:18 -05003351 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003352 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003353
Thomas Falcon032c5e82015-12-21 11:26:06 -06003354 spin_lock_init(&adapter->stats_lock);
3355
Thomas Falcon032c5e82015-12-21 11:26:06 -06003356 INIT_LIST_HEAD(&adapter->errors);
3357 INIT_LIST_HEAD(&adapter->inflight);
3358 spin_lock_init(&adapter->error_list_lock);
3359 spin_lock_init(&adapter->inflight_lock);
3360
John Allenf6ef6402017-03-17 17:13:42 -05003361 rc = ibmvnic_init(adapter);
3362 if (rc) {
3363 free_netdev(netdev);
3364 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003365 }
3366
Thomas Falconf39f0d12017-02-14 10:22:59 -06003367 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenea5509f2017-03-17 17:13:43 -05003368 adapter->is_closed = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003369
3370 rc = register_netdev(netdev);
3371 if (rc) {
3372 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
John Allenf6ef6402017-03-17 17:13:42 -05003373 free_netdev(netdev);
3374 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003375 }
3376 dev_info(&dev->dev, "ibmvnic registered\n");
3377
3378 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003379}
3380
3381static int ibmvnic_remove(struct vio_dev *dev)
3382{
3383 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003384
3385 unregister_netdev(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003386 free_netdev(netdev);
3387 dev_set_drvdata(&dev->dev, NULL);
3388
3389 return 0;
3390}
3391
3392static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3393{
3394 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3395 struct ibmvnic_adapter *adapter;
3396 struct iommu_table *tbl;
3397 unsigned long ret = 0;
3398 int i;
3399
3400 tbl = get_iommu_table_base(&vdev->dev);
3401
3402 /* netdev inits at probe time along with the structures we need below*/
3403 if (!netdev)
3404 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3405
3406 adapter = netdev_priv(netdev);
3407
3408 ret += PAGE_SIZE; /* the crq message queue */
3409 ret += adapter->bounce_buffer_size;
3410 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3411
3412 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3413 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3414
3415 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3416 i++)
3417 ret += adapter->rx_pool[i].size *
3418 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3419
3420 return ret;
3421}
3422
3423static int ibmvnic_resume(struct device *dev)
3424{
3425 struct net_device *netdev = dev_get_drvdata(dev);
3426 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3427 int i;
3428
3429 /* kick the interrupt handlers just in case we lost an interrupt */
3430 for (i = 0; i < adapter->req_rx_queues; i++)
3431 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3432 adapter->rx_scrq[i]);
3433
3434 return 0;
3435}
3436
3437static struct vio_device_id ibmvnic_device_table[] = {
3438 {"network", "IBM,vnic"},
3439 {"", "" }
3440};
3441MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3442
3443static const struct dev_pm_ops ibmvnic_pm_ops = {
3444 .resume = ibmvnic_resume
3445};
3446
3447static struct vio_driver ibmvnic_driver = {
3448 .id_table = ibmvnic_device_table,
3449 .probe = ibmvnic_probe,
3450 .remove = ibmvnic_remove,
3451 .get_desired_dma = ibmvnic_get_desired_dma,
3452 .name = ibmvnic_driver_name,
3453 .pm = &ibmvnic_pm_ops,
3454};
3455
3456/* module functions */
3457static int __init ibmvnic_module_init(void)
3458{
3459 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3460 IBMVNIC_DRIVER_VERSION);
3461
3462 return vio_register_driver(&ibmvnic_driver);
3463}
3464
3465static void __exit ibmvnic_module_exit(void)
3466{
3467 vio_unregister_driver(&ibmvnic_driver);
3468}
3469
3470module_init(ibmvnic_module_init);
3471module_exit(ibmvnic_module_exit);