blob: ce8b14748832f47a9f7afec7c201e4b29245c7f5 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060068#include <linux/interrupt.h>
69#include <net/net_namespace.h>
70#include <asm/hvcall.h>
71#include <linux/atomic.h>
72#include <asm/vio.h>
73#include <asm/iommu.h>
74#include <linux/uaccess.h>
75#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050076#include <linux/workqueue.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060077
78#include "ibmvnic.h"
79
80static const char ibmvnic_driver_name[] = "ibmvnic";
81static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
82
83MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
84MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
85MODULE_LICENSE("GPL");
86MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
87
88static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
89static int ibmvnic_remove(struct vio_dev *);
90static void release_sub_crqs(struct ibmvnic_adapter *);
91static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
92static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
93static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
95static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
96 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050097static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060098static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
99static int enable_scrq_irq(struct ibmvnic_adapter *,
100 struct ibmvnic_sub_crq_queue *);
101static int disable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int pending_scrq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static int ibmvnic_poll(struct napi_struct *napi, int data);
108static void send_map_query(struct ibmvnic_adapter *adapter);
109static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
110static void send_request_unmap(struct ibmvnic_adapter *, u8);
John Allenbd0b6722017-03-17 17:13:40 -0500111static void send_login(struct ibmvnic_adapter *adapter);
112static void send_cap_queries(struct ibmvnic_adapter *adapter);
113static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
John Allenea5509f2017-03-17 17:13:43 -0500114static int ibmvnic_init(struct ibmvnic_adapter *);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400115static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600116
117struct ibmvnic_stat {
118 char name[ETH_GSTRING_LEN];
119 int offset;
120};
121
122#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
123 offsetof(struct ibmvnic_statistics, stat))
124#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
125
126static const struct ibmvnic_stat ibmvnic_stats[] = {
127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
149};
150
151static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
152 unsigned long length, unsigned long *number,
153 unsigned long *irq)
154{
155 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
156 long rc;
157
158 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
159 *number = retbuf[0];
160 *irq = retbuf[1];
161
162 return rc;
163}
164
Thomas Falcon032c5e82015-12-21 11:26:06 -0600165static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
166 struct ibmvnic_long_term_buff *ltb, int size)
167{
168 struct device *dev = &adapter->vdev->dev;
169
170 ltb->size = size;
171 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
172 GFP_KERNEL);
173
174 if (!ltb->buff) {
175 dev_err(dev, "Couldn't alloc long term buffer\n");
176 return -ENOMEM;
177 }
178 ltb->map_id = adapter->map_id;
179 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500180
181 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600182 send_request_map(adapter, ltb->addr,
183 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600184 wait_for_completion(&adapter->fw_done);
185 return 0;
186}
187
188static void free_long_term_buff(struct ibmvnic_adapter *adapter,
189 struct ibmvnic_long_term_buff *ltb)
190{
191 struct device *dev = &adapter->vdev->dev;
192
Nathan Fontenotc657e322017-03-30 02:49:06 -0400193 if (!ltb->buff)
194 return;
195
Thomas Falcondfad09a2016-08-18 11:37:51 -0500196 if (!adapter->failover)
197 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400198 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600199}
200
Thomas Falcon032c5e82015-12-21 11:26:06 -0600201static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
202 struct ibmvnic_rx_pool *pool)
203{
204 int count = pool->size - atomic_read(&pool->available);
205 struct device *dev = &adapter->vdev->dev;
206 int buffers_added = 0;
207 unsigned long lpar_rc;
208 union sub_crq sub_crq;
209 struct sk_buff *skb;
210 unsigned int offset;
211 dma_addr_t dma_addr;
212 unsigned char *dst;
213 u64 *handle_array;
214 int shift = 0;
215 int index;
216 int i;
217
218 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
219 be32_to_cpu(adapter->login_rsp_buf->
220 off_rxadd_subcrqs));
221
222 for (i = 0; i < count; ++i) {
223 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
224 if (!skb) {
225 dev_err(dev, "Couldn't replenish rx buff\n");
226 adapter->replenish_no_mem++;
227 break;
228 }
229
230 index = pool->free_map[pool->next_free];
231
232 if (pool->rx_buff[index].skb)
233 dev_err(dev, "Inconsistent free_map!\n");
234
235 /* Copy the skb to the long term mapped DMA buffer */
236 offset = index * pool->buff_size;
237 dst = pool->long_term_buff.buff + offset;
238 memset(dst, 0, pool->buff_size);
239 dma_addr = pool->long_term_buff.addr + offset;
240 pool->rx_buff[index].data = dst;
241
242 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
243 pool->rx_buff[index].dma = dma_addr;
244 pool->rx_buff[index].skb = skb;
245 pool->rx_buff[index].pool_index = pool->index;
246 pool->rx_buff[index].size = pool->buff_size;
247
248 memset(&sub_crq, 0, sizeof(sub_crq));
249 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
250 sub_crq.rx_add.correlator =
251 cpu_to_be64((u64)&pool->rx_buff[index]);
252 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
253 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
254
255 /* The length field of the sCRQ is defined to be 24 bits so the
256 * buffer size needs to be left shifted by a byte before it is
257 * converted to big endian to prevent the last byte from being
258 * truncated.
259 */
260#ifdef __LITTLE_ENDIAN__
261 shift = 8;
262#endif
263 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
264
265 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
266 &sub_crq);
267 if (lpar_rc != H_SUCCESS)
268 goto failure;
269
270 buffers_added++;
271 adapter->replenish_add_buff_success++;
272 pool->next_free = (pool->next_free + 1) % pool->size;
273 }
274 atomic_add(buffers_added, &pool->available);
275 return;
276
277failure:
278 dev_info(dev, "replenish pools failure\n");
279 pool->free_map[pool->next_free] = index;
280 pool->rx_buff[index].skb = NULL;
281 if (!dma_mapping_error(dev, dma_addr))
282 dma_unmap_single(dev, dma_addr, pool->buff_size,
283 DMA_FROM_DEVICE);
284
285 dev_kfree_skb_any(skb);
286 adapter->replenish_add_buff_failure++;
287 atomic_add(buffers_added, &pool->available);
288}
289
290static void replenish_pools(struct ibmvnic_adapter *adapter)
291{
292 int i;
293
294 if (adapter->migrated)
295 return;
296
297 adapter->replenish_task_cycles++;
298 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
299 i++) {
300 if (adapter->rx_pool[i].active)
301 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
302 }
303}
304
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400305static void release_stats_token(struct ibmvnic_adapter *adapter)
306{
307 struct device *dev = &adapter->vdev->dev;
308
309 if (!adapter->stats_token)
310 return;
311
312 dma_unmap_single(dev, adapter->stats_token,
313 sizeof(struct ibmvnic_statistics),
314 DMA_FROM_DEVICE);
315 adapter->stats_token = 0;
316}
317
318static int init_stats_token(struct ibmvnic_adapter *adapter)
319{
320 struct device *dev = &adapter->vdev->dev;
321 dma_addr_t stok;
322
323 stok = dma_map_single(dev, &adapter->stats,
324 sizeof(struct ibmvnic_statistics),
325 DMA_FROM_DEVICE);
326 if (dma_mapping_error(dev, stok)) {
327 dev_err(dev, "Couldn't map stats buffer\n");
328 return -1;
329 }
330
331 adapter->stats_token = stok;
332 return 0;
333}
334
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400335static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600336{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400337 struct ibmvnic_rx_pool *rx_pool;
338 int rx_scrqs;
339 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600340
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400341 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600342 return;
343
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400344 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
345 for (i = 0; i < rx_scrqs; i++) {
346 rx_pool = &adapter->rx_pool[i];
347
348 kfree(rx_pool->free_map);
349 free_long_term_buff(adapter, &rx_pool->long_term_buff);
350
351 if (!rx_pool->rx_buff)
352 continue;
353
354 for (j = 0; j < rx_pool->size; j++) {
355 if (rx_pool->rx_buff[j].skb) {
356 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
357 rx_pool->rx_buff[i].skb = NULL;
358 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600359 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400360
361 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600362 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400363
364 kfree(adapter->rx_pool);
365 adapter->rx_pool = NULL;
366}
367
368static int init_rx_pools(struct net_device *netdev)
369{
370 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 struct device *dev = &adapter->vdev->dev;
372 struct ibmvnic_rx_pool *rx_pool;
373 int rxadd_subcrqs;
374 u64 *size_array;
375 int i, j;
376
377 rxadd_subcrqs =
378 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
379 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
380 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
381
382 adapter->rx_pool = kcalloc(rxadd_subcrqs,
383 sizeof(struct ibmvnic_rx_pool),
384 GFP_KERNEL);
385 if (!adapter->rx_pool) {
386 dev_err(dev, "Failed to allocate rx pools\n");
387 return -1;
388 }
389
390 for (i = 0; i < rxadd_subcrqs; i++) {
391 rx_pool = &adapter->rx_pool[i];
392
393 netdev_dbg(adapter->netdev,
394 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
395 i, adapter->req_rx_add_entries_per_subcrq,
396 be64_to_cpu(size_array[i]));
397
398 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
399 rx_pool->index = i;
400 rx_pool->buff_size = be64_to_cpu(size_array[i]);
401 rx_pool->active = 1;
402
403 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
404 GFP_KERNEL);
405 if (!rx_pool->free_map) {
406 release_rx_pools(adapter);
407 return -1;
408 }
409
410 rx_pool->rx_buff = kcalloc(rx_pool->size,
411 sizeof(struct ibmvnic_rx_buff),
412 GFP_KERNEL);
413 if (!rx_pool->rx_buff) {
414 dev_err(dev, "Couldn't alloc rx buffers\n");
415 release_rx_pools(adapter);
416 return -1;
417 }
418
419 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
420 rx_pool->size * rx_pool->buff_size)) {
421 release_rx_pools(adapter);
422 return -1;
423 }
424
425 for (j = 0; j < rx_pool->size; ++j)
426 rx_pool->free_map[j] = j;
427
428 atomic_set(&rx_pool->available, 0);
429 rx_pool->next_alloc = 0;
430 rx_pool->next_free = 0;
431 }
432
433 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600434}
435
Nathan Fontenotc657e322017-03-30 02:49:06 -0400436static void release_tx_pools(struct ibmvnic_adapter *adapter)
437{
438 struct ibmvnic_tx_pool *tx_pool;
439 int i, tx_scrqs;
440
441 if (!adapter->tx_pool)
442 return;
443
444 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
445 for (i = 0; i < tx_scrqs; i++) {
446 tx_pool = &adapter->tx_pool[i];
447 kfree(tx_pool->tx_buff);
448 free_long_term_buff(adapter, &tx_pool->long_term_buff);
449 kfree(tx_pool->free_map);
450 }
451
452 kfree(adapter->tx_pool);
453 adapter->tx_pool = NULL;
454}
455
456static int init_tx_pools(struct net_device *netdev)
457{
458 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
459 struct device *dev = &adapter->vdev->dev;
460 struct ibmvnic_tx_pool *tx_pool;
461 int tx_subcrqs;
462 int i, j;
463
464 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
465 adapter->tx_pool = kcalloc(tx_subcrqs,
466 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
467 if (!adapter->tx_pool)
468 return -1;
469
470 for (i = 0; i < tx_subcrqs; i++) {
471 tx_pool = &adapter->tx_pool[i];
472 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
473 sizeof(struct ibmvnic_tx_buff),
474 GFP_KERNEL);
475 if (!tx_pool->tx_buff) {
476 dev_err(dev, "tx pool buffer allocation failed\n");
477 release_tx_pools(adapter);
478 return -1;
479 }
480
481 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
482 adapter->req_tx_entries_per_subcrq *
483 adapter->req_mtu)) {
484 release_tx_pools(adapter);
485 return -1;
486 }
487
488 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
489 sizeof(int), GFP_KERNEL);
490 if (!tx_pool->free_map) {
491 release_tx_pools(adapter);
492 return -1;
493 }
494
495 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
496 tx_pool->free_map[j] = j;
497
498 tx_pool->consumer_index = 0;
499 tx_pool->producer_index = 0;
500 }
501
502 return 0;
503}
504
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400505static void release_bounce_buffer(struct ibmvnic_adapter *adapter)
506{
507 struct device *dev = &adapter->vdev->dev;
508
509 if (!adapter->bounce_buffer)
510 return;
511
512 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
513 dma_unmap_single(dev, adapter->bounce_buffer_dma,
514 adapter->bounce_buffer_size,
515 DMA_BIDIRECTIONAL);
516 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
517 }
518
519 kfree(adapter->bounce_buffer);
520 adapter->bounce_buffer = NULL;
521}
522
523static int init_bounce_buffer(struct net_device *netdev)
524{
525 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
526 struct device *dev = &adapter->vdev->dev;
527 char *buf;
528 int buf_sz;
529 dma_addr_t map_addr;
530
531 buf_sz = (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
532 buf = kmalloc(adapter->bounce_buffer_size, GFP_KERNEL);
533 if (!buf)
534 return -1;
535
536 map_addr = dma_map_single(dev, buf, buf_sz, DMA_TO_DEVICE);
537 if (dma_mapping_error(dev, map_addr)) {
538 dev_err(dev, "Couldn't map bounce buffer\n");
539 kfree(buf);
540 return -1;
541 }
542
543 adapter->bounce_buffer = buf;
544 adapter->bounce_buffer_size = buf_sz;
545 adapter->bounce_buffer_dma = map_addr;
546 return 0;
547}
548
Nathan Fontenot661a2622017-04-19 13:44:58 -0400549static void release_error_buffers(struct ibmvnic_adapter *adapter)
550{
551 struct device *dev = &adapter->vdev->dev;
552 struct ibmvnic_error_buff *error_buff, *tmp;
553 unsigned long flags;
554
555 spin_lock_irqsave(&adapter->error_list_lock, flags);
556 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
557 list_del(&error_buff->list);
558 dma_unmap_single(dev, error_buff->dma, error_buff->len,
559 DMA_FROM_DEVICE);
560 kfree(error_buff->buff);
561 kfree(error_buff);
562 }
563 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
564}
565
John Allena57a5d22017-03-17 17:13:41 -0500566static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600567{
568 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500569 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600570 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600571
John Allenbd0b6722017-03-17 17:13:40 -0500572 do {
573 if (adapter->renegotiate) {
574 adapter->renegotiate = false;
Nathan Fontenotb5108882017-03-30 02:49:18 -0400575 release_sub_crqs(adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500576
577 reinit_completion(&adapter->init_done);
578 send_cap_queries(adapter);
579 if (!wait_for_completion_timeout(&adapter->init_done,
580 timeout)) {
581 dev_err(dev, "Capabilities query timeout\n");
582 return -1;
583 }
584 }
585
586 reinit_completion(&adapter->init_done);
587 send_login(adapter);
588 if (!wait_for_completion_timeout(&adapter->init_done,
589 timeout)) {
590 dev_err(dev, "Login timeout\n");
591 return -1;
592 }
593 } while (adapter->renegotiate);
594
John Allena57a5d22017-03-17 17:13:41 -0500595 return 0;
596}
597
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400598static void release_resources(struct ibmvnic_adapter *adapter)
599{
600 release_bounce_buffer(adapter);
601 release_tx_pools(adapter);
602 release_rx_pools(adapter);
603
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400604 release_stats_token(adapter);
Nathan Fontenot661a2622017-04-19 13:44:58 -0400605 release_error_buffers(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400606}
607
John Allena57a5d22017-03-17 17:13:41 -0500608static int ibmvnic_open(struct net_device *netdev)
609{
610 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
611 struct device *dev = &adapter->vdev->dev;
John Allena57a5d22017-03-17 17:13:41 -0500612 union ibmvnic_crq crq;
John Allena57a5d22017-03-17 17:13:41 -0500613 int rc = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400614 int i;
John Allena57a5d22017-03-17 17:13:41 -0500615
John Allenea5509f2017-03-17 17:13:43 -0500616 if (adapter->is_closed) {
617 rc = ibmvnic_init(adapter);
618 if (rc)
619 return rc;
620 }
621
John Allena57a5d22017-03-17 17:13:41 -0500622 rc = ibmvnic_login(netdev);
623 if (rc)
624 return rc;
625
John Allenbd0b6722017-03-17 17:13:40 -0500626 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
627 if (rc) {
628 dev_err(dev, "failed to set the number of tx queues\n");
629 return -1;
630 }
631
632 rc = init_sub_crq_irqs(adapter);
633 if (rc) {
634 dev_err(dev, "failed to initialize sub crq irqs\n");
635 return -1;
636 }
637
Thomas Falcon032c5e82015-12-21 11:26:06 -0600638 adapter->map_id = 1;
639 adapter->napi = kcalloc(adapter->req_rx_queues,
640 sizeof(struct napi_struct), GFP_KERNEL);
641 if (!adapter->napi)
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400642 goto ibmvnic_open_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600643 for (i = 0; i < adapter->req_rx_queues; i++) {
644 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
645 NAPI_POLL_WEIGHT);
646 napi_enable(&adapter->napi[i]);
647 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600648
Thomas Falcon032c5e82015-12-21 11:26:06 -0600649 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400650
651 rc = init_rx_pools(netdev);
652 if (rc)
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400653 goto ibmvnic_open_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600654
Nathan Fontenotc657e322017-03-30 02:49:06 -0400655 rc = init_tx_pools(netdev);
656 if (rc)
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400657 goto ibmvnic_open_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600658
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400659 rc = init_bounce_buffer(netdev);
660 if (rc)
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400661 goto ibmvnic_open_fail;
Nathan Fontenotf0b8c962017-03-30 02:49:00 -0400662
Thomas Falcon032c5e82015-12-21 11:26:06 -0600663 replenish_pools(adapter);
664
665 /* We're ready to receive frames, enable the sub-crq interrupts and
666 * set the logical link state to up
667 */
668 for (i = 0; i < adapter->req_rx_queues; i++)
669 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
670
671 for (i = 0; i < adapter->req_tx_queues; i++)
672 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
673
674 memset(&crq, 0, sizeof(crq));
675 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
676 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
677 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
678 ibmvnic_send_crq(adapter, &crq);
679
Thomas Falconb8efb892016-07-06 15:35:15 -0500680 netif_tx_start_all_queues(netdev);
John Allenea5509f2017-03-17 17:13:43 -0500681 adapter->is_closed = false;
Thomas Falconb8efb892016-07-06 15:35:15 -0500682
Thomas Falcon032c5e82015-12-21 11:26:06 -0600683 return 0;
684
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400685ibmvnic_open_fail:
Thomas Falcon032c5e82015-12-21 11:26:06 -0600686 for (i = 0; i < adapter->req_rx_queues; i++)
Nathan Fontenote722af62017-02-10 13:29:06 -0500687 napi_disable(&adapter->napi[i]);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400688 release_resources(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600689 return -ENOMEM;
690}
691
Brian Kingdd9c20f2017-04-19 13:45:10 -0400692static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
693{
694 int i;
695
696 if (adapter->tx_scrq) {
697 for (i = 0; i < adapter->req_tx_queues; i++)
698 if (adapter->tx_scrq[i])
699 disable_irq(adapter->tx_scrq[i]->irq);
700 }
701
702 if (adapter->rx_scrq) {
703 for (i = 0; i < adapter->req_rx_queues; i++)
704 if (adapter->rx_scrq[i])
705 disable_irq(adapter->rx_scrq[i]->irq);
706 }
707}
708
John Allenea5509f2017-03-17 17:13:43 -0500709static int ibmvnic_close(struct net_device *netdev)
710{
711 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
712 union ibmvnic_crq crq;
713 int i;
714
715 adapter->closing = true;
Brian Kingdd9c20f2017-04-19 13:45:10 -0400716 disable_sub_crqs(adapter);
John Allenea5509f2017-03-17 17:13:43 -0500717
718 for (i = 0; i < adapter->req_rx_queues; i++)
719 napi_disable(&adapter->napi[i]);
720
721 if (!adapter->failover)
722 netif_tx_stop_all_queues(netdev);
723
Thomas Falcon032c5e82015-12-21 11:26:06 -0600724 memset(&crq, 0, sizeof(crq));
725 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
726 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
727 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
728 ibmvnic_send_crq(adapter, &crq);
729
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400730 release_resources(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600731
John Allenea5509f2017-03-17 17:13:43 -0500732 adapter->is_closed = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600733 adapter->closing = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600734 return 0;
735}
736
Thomas Falconad7775d2016-04-01 17:20:34 -0500737/**
738 * build_hdr_data - creates L2/L3/L4 header data buffer
739 * @hdr_field - bitfield determining needed headers
740 * @skb - socket buffer
741 * @hdr_len - array of header lengths
742 * @tot_len - total length of data
743 *
744 * Reads hdr_field to determine which headers are needed by firmware.
745 * Builds a buffer containing these headers. Saves individual header
746 * lengths and total buffer length to be used to build descriptors.
747 */
748static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
749 int *hdr_len, u8 *hdr_data)
750{
751 int len = 0;
752 u8 *hdr;
753
754 hdr_len[0] = sizeof(struct ethhdr);
755
756 if (skb->protocol == htons(ETH_P_IP)) {
757 hdr_len[1] = ip_hdr(skb)->ihl * 4;
758 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
759 hdr_len[2] = tcp_hdrlen(skb);
760 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
761 hdr_len[2] = sizeof(struct udphdr);
762 } else if (skb->protocol == htons(ETH_P_IPV6)) {
763 hdr_len[1] = sizeof(struct ipv6hdr);
764 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
765 hdr_len[2] = tcp_hdrlen(skb);
766 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
767 hdr_len[2] = sizeof(struct udphdr);
768 }
769
770 memset(hdr_data, 0, 120);
771 if ((hdr_field >> 6) & 1) {
772 hdr = skb_mac_header(skb);
773 memcpy(hdr_data, hdr, hdr_len[0]);
774 len += hdr_len[0];
775 }
776
777 if ((hdr_field >> 5) & 1) {
778 hdr = skb_network_header(skb);
779 memcpy(hdr_data + len, hdr, hdr_len[1]);
780 len += hdr_len[1];
781 }
782
783 if ((hdr_field >> 4) & 1) {
784 hdr = skb_transport_header(skb);
785 memcpy(hdr_data + len, hdr, hdr_len[2]);
786 len += hdr_len[2];
787 }
788 return len;
789}
790
791/**
792 * create_hdr_descs - create header and header extension descriptors
793 * @hdr_field - bitfield determining needed headers
794 * @data - buffer containing header data
795 * @len - length of data buffer
796 * @hdr_len - array of individual header lengths
797 * @scrq_arr - descriptor array
798 *
799 * Creates header and, if needed, header extension descriptors and
800 * places them in a descriptor array, scrq_arr
801 */
802
803static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
804 union sub_crq *scrq_arr)
805{
806 union sub_crq hdr_desc;
807 int tmp_len = len;
808 u8 *data, *cur;
809 int tmp;
810
811 while (tmp_len > 0) {
812 cur = hdr_data + len - tmp_len;
813
814 memset(&hdr_desc, 0, sizeof(hdr_desc));
815 if (cur != hdr_data) {
816 data = hdr_desc.hdr_ext.data;
817 tmp = tmp_len > 29 ? 29 : tmp_len;
818 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
819 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
820 hdr_desc.hdr_ext.len = tmp;
821 } else {
822 data = hdr_desc.hdr.data;
823 tmp = tmp_len > 24 ? 24 : tmp_len;
824 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
825 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
826 hdr_desc.hdr.len = tmp;
827 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
828 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
829 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
830 hdr_desc.hdr.flag = hdr_field << 1;
831 }
832 memcpy(data, cur, tmp);
833 tmp_len -= tmp;
834 *scrq_arr = hdr_desc;
835 scrq_arr++;
836 }
837}
838
839/**
840 * build_hdr_descs_arr - build a header descriptor array
841 * @skb - socket buffer
842 * @num_entries - number of descriptors to be sent
843 * @subcrq - first TX descriptor
844 * @hdr_field - bit field determining which headers will be sent
845 *
846 * This function will build a TX descriptor array with applicable
847 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
848 */
849
850static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
851 int *num_entries, u8 hdr_field)
852{
853 int hdr_len[3] = {0, 0, 0};
854 int tot_len, len;
855 u8 *hdr_data = txbuff->hdr_data;
856
857 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
858 txbuff->hdr_data);
859 len = tot_len;
860 len -= 24;
861 if (len > 0)
862 num_entries += len % 29 ? len / 29 + 1 : len / 29;
863 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
864 txbuff->indir_arr + 1);
865}
866
Thomas Falcon032c5e82015-12-21 11:26:06 -0600867static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
868{
869 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
870 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500871 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600872 struct device *dev = &adapter->vdev->dev;
873 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600874 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600875 struct ibmvnic_tx_pool *tx_pool;
876 unsigned int tx_send_failed = 0;
877 unsigned int tx_map_failed = 0;
878 unsigned int tx_dropped = 0;
879 unsigned int tx_packets = 0;
880 unsigned int tx_bytes = 0;
881 dma_addr_t data_dma_addr;
882 struct netdev_queue *txq;
883 bool used_bounce = false;
884 unsigned long lpar_rc;
885 union sub_crq tx_crq;
886 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500887 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600888 unsigned char *dst;
889 u64 *handle_array;
890 int index = 0;
891 int ret = 0;
892
893 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600894 tx_scrq = adapter->tx_scrq[queue_num];
Thomas Falcon032c5e82015-12-21 11:26:06 -0600895 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
896 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
897 be32_to_cpu(adapter->login_rsp_buf->
898 off_txsubm_subcrqs));
899 if (adapter->migrated) {
900 tx_send_failed++;
901 tx_dropped++;
902 ret = NETDEV_TX_BUSY;
903 goto out;
904 }
905
906 index = tx_pool->free_map[tx_pool->consumer_index];
907 offset = index * adapter->req_mtu;
908 dst = tx_pool->long_term_buff.buff + offset;
909 memset(dst, 0, adapter->req_mtu);
910 skb_copy_from_linear_data(skb, dst, skb->len);
911 data_dma_addr = tx_pool->long_term_buff.addr + offset;
912
913 tx_pool->consumer_index =
914 (tx_pool->consumer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -0600915 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600916
917 tx_buff = &tx_pool->tx_buff[index];
918 tx_buff->skb = skb;
919 tx_buff->data_dma[0] = data_dma_addr;
920 tx_buff->data_len[0] = skb->len;
921 tx_buff->index = index;
922 tx_buff->pool_index = queue_num;
923 tx_buff->last_frag = true;
924 tx_buff->used_bounce = used_bounce;
925
926 memset(&tx_crq, 0, sizeof(tx_crq));
927 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
928 tx_crq.v1.type = IBMVNIC_TX_DESC;
929 tx_crq.v1.n_crq_elem = 1;
930 tx_crq.v1.n_sge = 1;
931 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
932 tx_crq.v1.correlator = cpu_to_be32(index);
933 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
934 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
935 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
936
937 if (adapter->vlan_header_insertion) {
938 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
939 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
940 }
941
942 if (skb->protocol == htons(ETH_P_IP)) {
943 if (ip_hdr(skb)->version == 4)
944 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
945 else if (ip_hdr(skb)->version == 6)
946 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
947
948 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
949 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
950 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
951 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
952 }
953
Thomas Falconad7775d2016-04-01 17:20:34 -0500954 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600955 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500956 hdrs += 2;
957 }
958 /* determine if l2/3/4 headers are sent to firmware */
959 if ((*hdrs >> 7) & 1 &&
960 (skb->protocol == htons(ETH_P_IP) ||
961 skb->protocol == htons(ETH_P_IPV6))) {
962 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
963 tx_crq.v1.n_crq_elem = num_entries;
964 tx_buff->indir_arr[0] = tx_crq;
965 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
966 sizeof(tx_buff->indir_arr),
967 DMA_TO_DEVICE);
968 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
969 if (!firmware_has_feature(FW_FEATURE_CMO))
970 dev_err(dev, "tx: unable to map descriptor array\n");
971 tx_map_failed++;
972 tx_dropped++;
973 ret = NETDEV_TX_BUSY;
974 goto out;
975 }
John Allen498cd8e2016-04-06 11:49:55 -0500976 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500977 (u64)tx_buff->indir_dma,
978 (u64)num_entries);
979 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500980 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
981 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500982 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600983 if (lpar_rc != H_SUCCESS) {
984 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
985
986 if (tx_pool->consumer_index == 0)
987 tx_pool->consumer_index =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600988 adapter->req_tx_entries_per_subcrq - 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600989 else
990 tx_pool->consumer_index--;
991
992 tx_send_failed++;
993 tx_dropped++;
994 ret = NETDEV_TX_BUSY;
995 goto out;
996 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600997
Brian King58c8c0c2017-04-19 13:44:47 -0400998 if (atomic_inc_return(&tx_scrq->used)
999 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001000 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1001 netif_stop_subqueue(netdev, queue_num);
1002 }
1003
Thomas Falcon032c5e82015-12-21 11:26:06 -06001004 tx_packets++;
1005 tx_bytes += skb->len;
1006 txq->trans_start = jiffies;
1007 ret = NETDEV_TX_OK;
1008
1009out:
1010 netdev->stats.tx_dropped += tx_dropped;
1011 netdev->stats.tx_bytes += tx_bytes;
1012 netdev->stats.tx_packets += tx_packets;
1013 adapter->tx_send_failed += tx_send_failed;
1014 adapter->tx_map_failed += tx_map_failed;
1015
1016 return ret;
1017}
1018
1019static void ibmvnic_set_multi(struct net_device *netdev)
1020{
1021 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1022 struct netdev_hw_addr *ha;
1023 union ibmvnic_crq crq;
1024
1025 memset(&crq, 0, sizeof(crq));
1026 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1027 crq.request_capability.cmd = REQUEST_CAPABILITY;
1028
1029 if (netdev->flags & IFF_PROMISC) {
1030 if (!adapter->promisc_supported)
1031 return;
1032 } else {
1033 if (netdev->flags & IFF_ALLMULTI) {
1034 /* Accept all multicast */
1035 memset(&crq, 0, sizeof(crq));
1036 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1037 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1038 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1039 ibmvnic_send_crq(adapter, &crq);
1040 } else if (netdev_mc_empty(netdev)) {
1041 /* Reject all multicast */
1042 memset(&crq, 0, sizeof(crq));
1043 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1044 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1045 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1046 ibmvnic_send_crq(adapter, &crq);
1047 } else {
1048 /* Accept one or more multicast(s) */
1049 netdev_for_each_mc_addr(ha, netdev) {
1050 memset(&crq, 0, sizeof(crq));
1051 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1052 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1053 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1054 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1055 ha->addr);
1056 ibmvnic_send_crq(adapter, &crq);
1057 }
1058 }
1059 }
1060}
1061
1062static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1063{
1064 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1065 struct sockaddr *addr = p;
1066 union ibmvnic_crq crq;
1067
1068 if (!is_valid_ether_addr(addr->sa_data))
1069 return -EADDRNOTAVAIL;
1070
1071 memset(&crq, 0, sizeof(crq));
1072 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1073 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1074 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1075 ibmvnic_send_crq(adapter, &crq);
1076 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1077 return 0;
1078}
1079
Thomas Falcon032c5e82015-12-21 11:26:06 -06001080static void ibmvnic_tx_timeout(struct net_device *dev)
1081{
1082 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1083 int rc;
1084
1085 /* Adapter timed out, resetting it */
1086 release_sub_crqs(adapter);
1087 rc = ibmvnic_reset_crq(adapter);
1088 if (rc)
1089 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
1090 else
1091 ibmvnic_send_crq_init(adapter);
1092}
1093
1094static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1095 struct ibmvnic_rx_buff *rx_buff)
1096{
1097 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1098
1099 rx_buff->skb = NULL;
1100
1101 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1102 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1103
1104 atomic_dec(&pool->available);
1105}
1106
1107static int ibmvnic_poll(struct napi_struct *napi, int budget)
1108{
1109 struct net_device *netdev = napi->dev;
1110 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1111 int scrq_num = (int)(napi - adapter->napi);
1112 int frames_processed = 0;
1113restart_poll:
1114 while (frames_processed < budget) {
1115 struct sk_buff *skb;
1116 struct ibmvnic_rx_buff *rx_buff;
1117 union sub_crq *next;
1118 u32 length;
1119 u16 offset;
1120 u8 flags = 0;
1121
1122 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1123 break;
1124 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1125 rx_buff =
1126 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1127 rx_comp.correlator);
1128 /* do error checking */
1129 if (next->rx_comp.rc) {
1130 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1131 /* free the entry */
1132 next->rx_comp.first = 0;
1133 remove_buff_from_pool(adapter, rx_buff);
1134 break;
1135 }
1136
1137 length = be32_to_cpu(next->rx_comp.len);
1138 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1139 flags = next->rx_comp.flags;
1140 skb = rx_buff->skb;
1141 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1142 length);
1143 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1144 /* free the entry */
1145 next->rx_comp.first = 0;
1146 remove_buff_from_pool(adapter, rx_buff);
1147
1148 skb_put(skb, length);
1149 skb->protocol = eth_type_trans(skb, netdev);
1150
1151 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1152 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1153 skb->ip_summed = CHECKSUM_UNNECESSARY;
1154 }
1155
1156 length = skb->len;
1157 napi_gro_receive(napi, skb); /* send it up */
1158 netdev->stats.rx_packets++;
1159 netdev->stats.rx_bytes += length;
1160 frames_processed++;
1161 }
John Allen498cd8e2016-04-06 11:49:55 -05001162 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001163
1164 if (frames_processed < budget) {
1165 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08001166 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001167 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1168 napi_reschedule(napi)) {
1169 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1170 goto restart_poll;
1171 }
1172 }
1173 return frames_processed;
1174}
1175
1176#ifdef CONFIG_NET_POLL_CONTROLLER
1177static void ibmvnic_netpoll_controller(struct net_device *dev)
1178{
1179 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1180 int i;
1181
1182 replenish_pools(netdev_priv(dev));
1183 for (i = 0; i < adapter->req_rx_queues; i++)
1184 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1185 adapter->rx_scrq[i]);
1186}
1187#endif
1188
1189static const struct net_device_ops ibmvnic_netdev_ops = {
1190 .ndo_open = ibmvnic_open,
1191 .ndo_stop = ibmvnic_close,
1192 .ndo_start_xmit = ibmvnic_xmit,
1193 .ndo_set_rx_mode = ibmvnic_set_multi,
1194 .ndo_set_mac_address = ibmvnic_set_mac,
1195 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001196 .ndo_tx_timeout = ibmvnic_tx_timeout,
1197#ifdef CONFIG_NET_POLL_CONTROLLER
1198 .ndo_poll_controller = ibmvnic_netpoll_controller,
1199#endif
1200};
1201
1202/* ethtool functions */
1203
Philippe Reynes8a433792017-01-07 22:37:29 +01001204static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1205 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001206{
Philippe Reynes8a433792017-01-07 22:37:29 +01001207 u32 supported, advertising;
1208
1209 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001210 SUPPORTED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001211 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001212 ADVERTISED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001213 cmd->base.speed = SPEED_1000;
1214 cmd->base.duplex = DUPLEX_FULL;
1215 cmd->base.port = PORT_FIBRE;
1216 cmd->base.phy_address = 0;
1217 cmd->base.autoneg = AUTONEG_ENABLE;
1218
1219 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1220 supported);
1221 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1222 advertising);
1223
Thomas Falcon032c5e82015-12-21 11:26:06 -06001224 return 0;
1225}
1226
1227static void ibmvnic_get_drvinfo(struct net_device *dev,
1228 struct ethtool_drvinfo *info)
1229{
1230 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1231 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1232}
1233
1234static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1235{
1236 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1237
1238 return adapter->msg_enable;
1239}
1240
1241static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1242{
1243 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1244
1245 adapter->msg_enable = data;
1246}
1247
1248static u32 ibmvnic_get_link(struct net_device *netdev)
1249{
1250 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1251
1252 /* Don't need to send a query because we request a logical link up at
1253 * init and then we wait for link state indications
1254 */
1255 return adapter->logical_link_state;
1256}
1257
1258static void ibmvnic_get_ringparam(struct net_device *netdev,
1259 struct ethtool_ringparam *ring)
1260{
1261 ring->rx_max_pending = 0;
1262 ring->tx_max_pending = 0;
1263 ring->rx_mini_max_pending = 0;
1264 ring->rx_jumbo_max_pending = 0;
1265 ring->rx_pending = 0;
1266 ring->tx_pending = 0;
1267 ring->rx_mini_pending = 0;
1268 ring->rx_jumbo_pending = 0;
1269}
1270
1271static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1272{
1273 int i;
1274
1275 if (stringset != ETH_SS_STATS)
1276 return;
1277
1278 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1279 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1280}
1281
1282static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1283{
1284 switch (sset) {
1285 case ETH_SS_STATS:
1286 return ARRAY_SIZE(ibmvnic_stats);
1287 default:
1288 return -EOPNOTSUPP;
1289 }
1290}
1291
1292static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1293 struct ethtool_stats *stats, u64 *data)
1294{
1295 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1296 union ibmvnic_crq crq;
1297 int i;
1298
1299 memset(&crq, 0, sizeof(crq));
1300 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1301 crq.request_statistics.cmd = REQUEST_STATISTICS;
1302 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1303 crq.request_statistics.len =
1304 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001305
1306 /* Wait for data to be written */
1307 init_completion(&adapter->stats_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05001308 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001309 wait_for_completion(&adapter->stats_done);
1310
1311 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1312 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1313}
1314
1315static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001316 .get_drvinfo = ibmvnic_get_drvinfo,
1317 .get_msglevel = ibmvnic_get_msglevel,
1318 .set_msglevel = ibmvnic_set_msglevel,
1319 .get_link = ibmvnic_get_link,
1320 .get_ringparam = ibmvnic_get_ringparam,
1321 .get_strings = ibmvnic_get_strings,
1322 .get_sset_count = ibmvnic_get_sset_count,
1323 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01001324 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001325};
1326
1327/* Routines for managing CRQs/sCRQs */
1328
1329static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1330 struct ibmvnic_sub_crq_queue *scrq)
1331{
1332 struct device *dev = &adapter->vdev->dev;
1333 long rc;
1334
1335 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1336
1337 /* Close the sub-crqs */
1338 do {
1339 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1340 adapter->vdev->unit_address,
1341 scrq->crq_num);
1342 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1343
Thomas Falconffa73852017-04-19 13:44:29 -04001344 if (rc) {
1345 netdev_err(adapter->netdev,
1346 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1347 scrq->crq_num, rc);
1348 }
1349
Thomas Falcon032c5e82015-12-21 11:26:06 -06001350 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1351 DMA_BIDIRECTIONAL);
1352 free_pages((unsigned long)scrq->msgs, 2);
1353 kfree(scrq);
1354}
1355
1356static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1357 *adapter)
1358{
1359 struct device *dev = &adapter->vdev->dev;
1360 struct ibmvnic_sub_crq_queue *scrq;
1361 int rc;
1362
1363 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1364 if (!scrq)
1365 return NULL;
1366
Thomas Falcon12608c22016-10-17 15:28:09 -05001367 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001368 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1369 if (!scrq->msgs) {
1370 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1371 goto zero_page_failed;
1372 }
1373
1374 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1375 DMA_BIDIRECTIONAL);
1376 if (dma_mapping_error(dev, scrq->msg_token)) {
1377 dev_warn(dev, "Couldn't map crq queue messages page\n");
1378 goto map_failed;
1379 }
1380
1381 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1382 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1383
1384 if (rc == H_RESOURCE)
1385 rc = ibmvnic_reset_crq(adapter);
1386
1387 if (rc == H_CLOSED) {
1388 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1389 } else if (rc) {
1390 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1391 goto reg_failed;
1392 }
1393
Thomas Falcon032c5e82015-12-21 11:26:06 -06001394 scrq->adapter = adapter;
1395 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1396 scrq->cur = 0;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001397 atomic_set(&scrq->used, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001398 scrq->rx_skb_top = NULL;
1399 spin_lock_init(&scrq->lock);
1400
1401 netdev_dbg(adapter->netdev,
1402 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1403 scrq->crq_num, scrq->hw_irq, scrq->irq);
1404
1405 return scrq;
1406
Thomas Falcon032c5e82015-12-21 11:26:06 -06001407reg_failed:
1408 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1409 DMA_BIDIRECTIONAL);
1410map_failed:
1411 free_pages((unsigned long)scrq->msgs, 2);
1412zero_page_failed:
1413 kfree(scrq);
1414
1415 return NULL;
1416}
1417
1418static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1419{
1420 int i;
1421
1422 if (adapter->tx_scrq) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04001423 for (i = 0; i < adapter->req_tx_queues; i++) {
1424 if (!adapter->tx_scrq[i])
1425 continue;
1426
1427 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001428 free_irq(adapter->tx_scrq[i]->irq,
1429 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001430 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04001431 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001432 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001433
1434 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1435 }
1436
Nathan Fontenot9501df32017-03-15 23:38:07 -04001437 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001438 adapter->tx_scrq = NULL;
1439 }
1440
1441 if (adapter->rx_scrq) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04001442 for (i = 0; i < adapter->req_rx_queues; i++) {
1443 if (!adapter->rx_scrq[i])
1444 continue;
1445
1446 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001447 free_irq(adapter->rx_scrq[i]->irq,
1448 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001449 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04001450 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001451 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001452
1453 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1454 }
1455
Nathan Fontenot9501df32017-03-15 23:38:07 -04001456 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001457 adapter->rx_scrq = NULL;
1458 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001459}
1460
1461static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1462 struct ibmvnic_sub_crq_queue *scrq)
1463{
1464 struct device *dev = &adapter->vdev->dev;
1465 unsigned long rc;
1466
1467 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1468 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1469 if (rc)
1470 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1471 scrq->hw_irq, rc);
1472 return rc;
1473}
1474
1475static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1476 struct ibmvnic_sub_crq_queue *scrq)
1477{
1478 struct device *dev = &adapter->vdev->dev;
1479 unsigned long rc;
1480
1481 if (scrq->hw_irq > 0x100000000ULL) {
1482 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1483 return 1;
1484 }
1485
1486 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1487 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1488 if (rc)
1489 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1490 scrq->hw_irq, rc);
1491 return rc;
1492}
1493
1494static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1495 struct ibmvnic_sub_crq_queue *scrq)
1496{
1497 struct device *dev = &adapter->vdev->dev;
1498 struct ibmvnic_tx_buff *txbuff;
1499 union sub_crq *next;
1500 int index;
1501 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001502 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001503
1504restart_loop:
1505 while (pending_scrq(adapter, scrq)) {
1506 unsigned int pool = scrq->pool_index;
1507
1508 next = ibmvnic_next_scrq(adapter, scrq);
1509 for (i = 0; i < next->tx_comp.num_comps; i++) {
1510 if (next->tx_comp.rcs[i]) {
1511 dev_err(dev, "tx error %x\n",
1512 next->tx_comp.rcs[i]);
1513 continue;
1514 }
1515 index = be32_to_cpu(next->tx_comp.correlators[i]);
1516 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1517
1518 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1519 if (!txbuff->data_dma[j])
1520 continue;
1521
1522 txbuff->data_dma[j] = 0;
1523 txbuff->used_bounce = false;
1524 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001525 /* if sub_crq was sent indirectly */
1526 first = txbuff->indir_arr[0].generic.first;
1527 if (first == IBMVNIC_CRQ_CMD) {
1528 dma_unmap_single(dev, txbuff->indir_dma,
1529 sizeof(txbuff->indir_arr),
1530 DMA_TO_DEVICE);
1531 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001532
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001533 if (txbuff->last_frag) {
Brian King58c8c0c2017-04-19 13:44:47 -04001534 if (atomic_sub_return(next->tx_comp.num_comps,
1535 &scrq->used) <=
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001536 (adapter->req_tx_entries_per_subcrq / 2) &&
1537 netif_subqueue_stopped(adapter->netdev,
1538 txbuff->skb)) {
1539 netif_wake_subqueue(adapter->netdev,
1540 scrq->pool_index);
1541 netdev_dbg(adapter->netdev,
1542 "Started queue %d\n",
1543 scrq->pool_index);
1544 }
1545
Thomas Falcon032c5e82015-12-21 11:26:06 -06001546 dev_kfree_skb_any(txbuff->skb);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001547 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001548
1549 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1550 producer_index] = index;
1551 adapter->tx_pool[pool].producer_index =
1552 (adapter->tx_pool[pool].producer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001553 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001554 }
1555 /* remove tx_comp scrq*/
1556 next->tx_comp.first = 0;
1557 }
1558
1559 enable_scrq_irq(adapter, scrq);
1560
1561 if (pending_scrq(adapter, scrq)) {
1562 disable_scrq_irq(adapter, scrq);
1563 goto restart_loop;
1564 }
1565
1566 return 0;
1567}
1568
1569static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1570{
1571 struct ibmvnic_sub_crq_queue *scrq = instance;
1572 struct ibmvnic_adapter *adapter = scrq->adapter;
1573
1574 disable_scrq_irq(adapter, scrq);
1575 ibmvnic_complete_tx(adapter, scrq);
1576
1577 return IRQ_HANDLED;
1578}
1579
1580static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1581{
1582 struct ibmvnic_sub_crq_queue *scrq = instance;
1583 struct ibmvnic_adapter *adapter = scrq->adapter;
1584
1585 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1586 disable_scrq_irq(adapter, scrq);
1587 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1588 }
1589
1590 return IRQ_HANDLED;
1591}
1592
Thomas Falconea22d512016-07-06 15:35:17 -05001593static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1594{
1595 struct device *dev = &adapter->vdev->dev;
1596 struct ibmvnic_sub_crq_queue *scrq;
1597 int i = 0, j = 0;
1598 int rc = 0;
1599
1600 for (i = 0; i < adapter->req_tx_queues; i++) {
1601 scrq = adapter->tx_scrq[i];
1602 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1603
Michael Ellerman99c17902016-09-10 19:59:05 +10001604 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001605 rc = -EINVAL;
1606 dev_err(dev, "Error mapping irq\n");
1607 goto req_tx_irq_failed;
1608 }
1609
1610 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1611 0, "ibmvnic_tx", scrq);
1612
1613 if (rc) {
1614 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1615 scrq->irq, rc);
1616 irq_dispose_mapping(scrq->irq);
1617 goto req_rx_irq_failed;
1618 }
1619 }
1620
1621 for (i = 0; i < adapter->req_rx_queues; i++) {
1622 scrq = adapter->rx_scrq[i];
1623 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001624 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001625 rc = -EINVAL;
1626 dev_err(dev, "Error mapping irq\n");
1627 goto req_rx_irq_failed;
1628 }
1629 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1630 0, "ibmvnic_rx", scrq);
1631 if (rc) {
1632 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1633 scrq->irq, rc);
1634 irq_dispose_mapping(scrq->irq);
1635 goto req_rx_irq_failed;
1636 }
1637 }
1638 return rc;
1639
1640req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001641 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001642 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1643 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001644 }
Thomas Falconea22d512016-07-06 15:35:17 -05001645 i = adapter->req_tx_queues;
1646req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001647 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001648 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1649 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001650 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001651 release_sub_crqs(adapter);
Thomas Falconea22d512016-07-06 15:35:17 -05001652 return rc;
1653}
1654
Thomas Falcon032c5e82015-12-21 11:26:06 -06001655static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1656{
1657 struct device *dev = &adapter->vdev->dev;
1658 struct ibmvnic_sub_crq_queue **allqueues;
1659 int registered_queues = 0;
1660 union ibmvnic_crq crq;
1661 int total_queues;
1662 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001663 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001664
1665 if (!retry) {
1666 /* Sub-CRQ entries are 32 byte long */
1667 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1668
1669 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1670 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1671 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1672 goto allqueues_failed;
1673 }
1674
1675 /* Get the minimum between the queried max and the entries
1676 * that fit in our PAGE_SIZE
1677 */
1678 adapter->req_tx_entries_per_subcrq =
1679 adapter->max_tx_entries_per_subcrq > entries_page ?
1680 entries_page : adapter->max_tx_entries_per_subcrq;
1681 adapter->req_rx_add_entries_per_subcrq =
1682 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1683 entries_page : adapter->max_rx_add_entries_per_subcrq;
1684
John Allen6dbcd8f2016-11-07 14:27:28 -06001685 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1686 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
John Allen498cd8e2016-04-06 11:49:55 -05001687 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001688
Thomas Falconf39f0d12017-02-14 10:22:59 -06001689 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001690 }
1691
1692 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1693
1694 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1695 if (!allqueues)
1696 goto allqueues_failed;
1697
1698 for (i = 0; i < total_queues; i++) {
1699 allqueues[i] = init_sub_crq_queue(adapter);
1700 if (!allqueues[i]) {
1701 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1702 break;
1703 }
1704 registered_queues++;
1705 }
1706
1707 /* Make sure we were able to register the minimum number of queues */
1708 if (registered_queues <
1709 adapter->min_tx_queues + adapter->min_rx_queues) {
1710 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1711 goto tx_failed;
1712 }
1713
1714 /* Distribute the failed allocated queues*/
1715 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1716 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1717 switch (i % 3) {
1718 case 0:
1719 if (adapter->req_rx_queues > adapter->min_rx_queues)
1720 adapter->req_rx_queues--;
1721 else
1722 more++;
1723 break;
1724 case 1:
1725 if (adapter->req_tx_queues > adapter->min_tx_queues)
1726 adapter->req_tx_queues--;
1727 else
1728 more++;
1729 break;
1730 }
1731 }
1732
1733 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1734 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1735 if (!adapter->tx_scrq)
1736 goto tx_failed;
1737
1738 for (i = 0; i < adapter->req_tx_queues; i++) {
1739 adapter->tx_scrq[i] = allqueues[i];
1740 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001741 }
1742
1743 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1744 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1745 if (!adapter->rx_scrq)
1746 goto rx_failed;
1747
1748 for (i = 0; i < adapter->req_rx_queues; i++) {
1749 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1750 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001751 }
1752
1753 memset(&crq, 0, sizeof(crq));
1754 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1755 crq.request_capability.cmd = REQUEST_CAPABILITY;
1756
1757 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001758 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001759 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001760 ibmvnic_send_crq(adapter, &crq);
1761
1762 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001763 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001764 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001765 ibmvnic_send_crq(adapter, &crq);
1766
1767 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001768 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001769 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001770 ibmvnic_send_crq(adapter, &crq);
1771
1772 crq.request_capability.capability =
1773 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1774 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001775 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001776 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001777 ibmvnic_send_crq(adapter, &crq);
1778
1779 crq.request_capability.capability =
1780 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1781 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001782 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001783 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001784 ibmvnic_send_crq(adapter, &crq);
1785
1786 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001787 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06001788 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001789 ibmvnic_send_crq(adapter, &crq);
1790
1791 if (adapter->netdev->flags & IFF_PROMISC) {
1792 if (adapter->promisc_supported) {
1793 crq.request_capability.capability =
1794 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001795 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06001796 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001797 ibmvnic_send_crq(adapter, &crq);
1798 }
1799 } else {
1800 crq.request_capability.capability =
1801 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001802 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06001803 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001804 ibmvnic_send_crq(adapter, &crq);
1805 }
1806
1807 kfree(allqueues);
1808
1809 return;
1810
Thomas Falcon032c5e82015-12-21 11:26:06 -06001811rx_failed:
1812 kfree(adapter->tx_scrq);
1813 adapter->tx_scrq = NULL;
1814tx_failed:
1815 for (i = 0; i < registered_queues; i++)
1816 release_sub_crq_queue(adapter, allqueues[i]);
1817 kfree(allqueues);
1818allqueues_failed:
1819 ibmvnic_remove(adapter->vdev);
1820}
1821
1822static int pending_scrq(struct ibmvnic_adapter *adapter,
1823 struct ibmvnic_sub_crq_queue *scrq)
1824{
1825 union sub_crq *entry = &scrq->msgs[scrq->cur];
1826
1827 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1828 return 1;
1829 else
1830 return 0;
1831}
1832
1833static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1834 struct ibmvnic_sub_crq_queue *scrq)
1835{
1836 union sub_crq *entry;
1837 unsigned long flags;
1838
1839 spin_lock_irqsave(&scrq->lock, flags);
1840 entry = &scrq->msgs[scrq->cur];
1841 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1842 if (++scrq->cur == scrq->size)
1843 scrq->cur = 0;
1844 } else {
1845 entry = NULL;
1846 }
1847 spin_unlock_irqrestore(&scrq->lock, flags);
1848
1849 return entry;
1850}
1851
1852static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1853{
1854 struct ibmvnic_crq_queue *queue = &adapter->crq;
1855 union ibmvnic_crq *crq;
1856
1857 crq = &queue->msgs[queue->cur];
1858 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1859 if (++queue->cur == queue->size)
1860 queue->cur = 0;
1861 } else {
1862 crq = NULL;
1863 }
1864
1865 return crq;
1866}
1867
1868static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1869 union sub_crq *sub_crq)
1870{
1871 unsigned int ua = adapter->vdev->unit_address;
1872 struct device *dev = &adapter->vdev->dev;
1873 u64 *u64_crq = (u64 *)sub_crq;
1874 int rc;
1875
1876 netdev_dbg(adapter->netdev,
1877 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1878 (unsigned long int)cpu_to_be64(remote_handle),
1879 (unsigned long int)cpu_to_be64(u64_crq[0]),
1880 (unsigned long int)cpu_to_be64(u64_crq[1]),
1881 (unsigned long int)cpu_to_be64(u64_crq[2]),
1882 (unsigned long int)cpu_to_be64(u64_crq[3]));
1883
1884 /* Make sure the hypervisor sees the complete request */
1885 mb();
1886
1887 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1888 cpu_to_be64(remote_handle),
1889 cpu_to_be64(u64_crq[0]),
1890 cpu_to_be64(u64_crq[1]),
1891 cpu_to_be64(u64_crq[2]),
1892 cpu_to_be64(u64_crq[3]));
1893
1894 if (rc) {
1895 if (rc == H_CLOSED)
1896 dev_warn(dev, "CRQ Queue closed\n");
1897 dev_err(dev, "Send error (rc=%d)\n", rc);
1898 }
1899
1900 return rc;
1901}
1902
Thomas Falconad7775d2016-04-01 17:20:34 -05001903static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1904 u64 remote_handle, u64 ioba, u64 num_entries)
1905{
1906 unsigned int ua = adapter->vdev->unit_address;
1907 struct device *dev = &adapter->vdev->dev;
1908 int rc;
1909
1910 /* Make sure the hypervisor sees the complete request */
1911 mb();
1912 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1913 cpu_to_be64(remote_handle),
1914 ioba, num_entries);
1915
1916 if (rc) {
1917 if (rc == H_CLOSED)
1918 dev_warn(dev, "CRQ Queue closed\n");
1919 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1920 }
1921
1922 return rc;
1923}
1924
Thomas Falcon032c5e82015-12-21 11:26:06 -06001925static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1926 union ibmvnic_crq *crq)
1927{
1928 unsigned int ua = adapter->vdev->unit_address;
1929 struct device *dev = &adapter->vdev->dev;
1930 u64 *u64_crq = (u64 *)crq;
1931 int rc;
1932
1933 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1934 (unsigned long int)cpu_to_be64(u64_crq[0]),
1935 (unsigned long int)cpu_to_be64(u64_crq[1]));
1936
1937 /* Make sure the hypervisor sees the complete request */
1938 mb();
1939
1940 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1941 cpu_to_be64(u64_crq[0]),
1942 cpu_to_be64(u64_crq[1]));
1943
1944 if (rc) {
1945 if (rc == H_CLOSED)
1946 dev_warn(dev, "CRQ Queue closed\n");
1947 dev_warn(dev, "Send error (rc=%d)\n", rc);
1948 }
1949
1950 return rc;
1951}
1952
1953static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1954{
1955 union ibmvnic_crq crq;
1956
1957 memset(&crq, 0, sizeof(crq));
1958 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1959 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1960 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1961
1962 return ibmvnic_send_crq(adapter, &crq);
1963}
1964
1965static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1966{
1967 union ibmvnic_crq crq;
1968
1969 memset(&crq, 0, sizeof(crq));
1970 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1971 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1972 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1973
1974 return ibmvnic_send_crq(adapter, &crq);
1975}
1976
1977static int send_version_xchg(struct ibmvnic_adapter *adapter)
1978{
1979 union ibmvnic_crq crq;
1980
1981 memset(&crq, 0, sizeof(crq));
1982 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1983 crq.version_exchange.cmd = VERSION_EXCHANGE;
1984 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1985
1986 return ibmvnic_send_crq(adapter, &crq);
1987}
1988
1989static void send_login(struct ibmvnic_adapter *adapter)
1990{
1991 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1992 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001993 struct device *dev = &adapter->vdev->dev;
1994 dma_addr_t rsp_buffer_token;
1995 dma_addr_t buffer_token;
1996 size_t rsp_buffer_size;
1997 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001998 size_t buffer_size;
1999 __be64 *tx_list_p;
2000 __be64 *rx_list_p;
2001 int i;
2002
2003 buffer_size =
2004 sizeof(struct ibmvnic_login_buffer) +
2005 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2006
2007 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2008 if (!login_buffer)
2009 goto buf_alloc_failed;
2010
2011 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2012 DMA_TO_DEVICE);
2013 if (dma_mapping_error(dev, buffer_token)) {
2014 dev_err(dev, "Couldn't map login buffer\n");
2015 goto buf_map_failed;
2016 }
2017
John Allen498cd8e2016-04-06 11:49:55 -05002018 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2019 sizeof(u64) * adapter->req_tx_queues +
2020 sizeof(u64) * adapter->req_rx_queues +
2021 sizeof(u64) * adapter->req_rx_queues +
2022 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002023
2024 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2025 if (!login_rsp_buffer)
2026 goto buf_rsp_alloc_failed;
2027
2028 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2029 rsp_buffer_size, DMA_FROM_DEVICE);
2030 if (dma_mapping_error(dev, rsp_buffer_token)) {
2031 dev_err(dev, "Couldn't map login rsp buffer\n");
2032 goto buf_rsp_map_failed;
2033 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04002034
Thomas Falcon032c5e82015-12-21 11:26:06 -06002035 adapter->login_buf = login_buffer;
2036 adapter->login_buf_token = buffer_token;
2037 adapter->login_buf_sz = buffer_size;
2038 adapter->login_rsp_buf = login_rsp_buffer;
2039 adapter->login_rsp_buf_token = rsp_buffer_token;
2040 adapter->login_rsp_buf_sz = rsp_buffer_size;
2041
2042 login_buffer->len = cpu_to_be32(buffer_size);
2043 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2044 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2045 login_buffer->off_txcomp_subcrqs =
2046 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2047 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2048 login_buffer->off_rxcomp_subcrqs =
2049 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2050 sizeof(u64) * adapter->req_tx_queues);
2051 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2052 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2053
2054 tx_list_p = (__be64 *)((char *)login_buffer +
2055 sizeof(struct ibmvnic_login_buffer));
2056 rx_list_p = (__be64 *)((char *)login_buffer +
2057 sizeof(struct ibmvnic_login_buffer) +
2058 sizeof(u64) * adapter->req_tx_queues);
2059
2060 for (i = 0; i < adapter->req_tx_queues; i++) {
2061 if (adapter->tx_scrq[i]) {
2062 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2063 crq_num);
2064 }
2065 }
2066
2067 for (i = 0; i < adapter->req_rx_queues; i++) {
2068 if (adapter->rx_scrq[i]) {
2069 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2070 crq_num);
2071 }
2072 }
2073
2074 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2075 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2076 netdev_dbg(adapter->netdev, "%016lx\n",
2077 ((unsigned long int *)(adapter->login_buf))[i]);
2078 }
2079
2080 memset(&crq, 0, sizeof(crq));
2081 crq.login.first = IBMVNIC_CRQ_CMD;
2082 crq.login.cmd = LOGIN;
2083 crq.login.ioba = cpu_to_be32(buffer_token);
2084 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002085 ibmvnic_send_crq(adapter, &crq);
2086
2087 return;
2088
Thomas Falcon032c5e82015-12-21 11:26:06 -06002089buf_rsp_map_failed:
2090 kfree(login_rsp_buffer);
2091buf_rsp_alloc_failed:
2092 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2093buf_map_failed:
2094 kfree(login_buffer);
2095buf_alloc_failed:
2096 return;
2097}
2098
2099static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2100 u32 len, u8 map_id)
2101{
2102 union ibmvnic_crq crq;
2103
2104 memset(&crq, 0, sizeof(crq));
2105 crq.request_map.first = IBMVNIC_CRQ_CMD;
2106 crq.request_map.cmd = REQUEST_MAP;
2107 crq.request_map.map_id = map_id;
2108 crq.request_map.ioba = cpu_to_be32(addr);
2109 crq.request_map.len = cpu_to_be32(len);
2110 ibmvnic_send_crq(adapter, &crq);
2111}
2112
2113static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2114{
2115 union ibmvnic_crq crq;
2116
2117 memset(&crq, 0, sizeof(crq));
2118 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2119 crq.request_unmap.cmd = REQUEST_UNMAP;
2120 crq.request_unmap.map_id = map_id;
2121 ibmvnic_send_crq(adapter, &crq);
2122}
2123
2124static void send_map_query(struct ibmvnic_adapter *adapter)
2125{
2126 union ibmvnic_crq crq;
2127
2128 memset(&crq, 0, sizeof(crq));
2129 crq.query_map.first = IBMVNIC_CRQ_CMD;
2130 crq.query_map.cmd = QUERY_MAP;
2131 ibmvnic_send_crq(adapter, &crq);
2132}
2133
2134/* Send a series of CRQs requesting various capabilities of the VNIC server */
2135static void send_cap_queries(struct ibmvnic_adapter *adapter)
2136{
2137 union ibmvnic_crq crq;
2138
Thomas Falcon901e0402017-02-15 12:17:59 -06002139 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002140 memset(&crq, 0, sizeof(crq));
2141 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2142 crq.query_capability.cmd = QUERY_CAPABILITY;
2143
2144 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002145 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002146 ibmvnic_send_crq(adapter, &crq);
2147
2148 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002149 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002150 ibmvnic_send_crq(adapter, &crq);
2151
2152 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002153 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002154 ibmvnic_send_crq(adapter, &crq);
2155
2156 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002157 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002158 ibmvnic_send_crq(adapter, &crq);
2159
2160 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002161 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002162 ibmvnic_send_crq(adapter, &crq);
2163
2164 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002165 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002166 ibmvnic_send_crq(adapter, &crq);
2167
2168 crq.query_capability.capability =
2169 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002170 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002171 ibmvnic_send_crq(adapter, &crq);
2172
2173 crq.query_capability.capability =
2174 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002175 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002176 ibmvnic_send_crq(adapter, &crq);
2177
2178 crq.query_capability.capability =
2179 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002180 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002181 ibmvnic_send_crq(adapter, &crq);
2182
2183 crq.query_capability.capability =
2184 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002185 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002186 ibmvnic_send_crq(adapter, &crq);
2187
2188 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06002189 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002190 ibmvnic_send_crq(adapter, &crq);
2191
2192 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002193 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002194 ibmvnic_send_crq(adapter, &crq);
2195
2196 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002197 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002198 ibmvnic_send_crq(adapter, &crq);
2199
2200 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002201 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002202 ibmvnic_send_crq(adapter, &crq);
2203
2204 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06002205 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002206 ibmvnic_send_crq(adapter, &crq);
2207
2208 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06002209 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002210 ibmvnic_send_crq(adapter, &crq);
2211
2212 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002213 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002214 ibmvnic_send_crq(adapter, &crq);
2215
2216 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002217 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002218 ibmvnic_send_crq(adapter, &crq);
2219
2220 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002221 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002222 ibmvnic_send_crq(adapter, &crq);
2223
2224 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002225 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002226 ibmvnic_send_crq(adapter, &crq);
2227
2228 crq.query_capability.capability =
2229 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06002230 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002231 ibmvnic_send_crq(adapter, &crq);
2232
2233 crq.query_capability.capability =
2234 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002235 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002236 ibmvnic_send_crq(adapter, &crq);
2237
2238 crq.query_capability.capability =
2239 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002240 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002241 ibmvnic_send_crq(adapter, &crq);
2242
2243 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002244 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002245 ibmvnic_send_crq(adapter, &crq);
2246}
2247
2248static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2249{
2250 struct device *dev = &adapter->vdev->dev;
2251 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2252 union ibmvnic_crq crq;
2253 int i;
2254
2255 dma_unmap_single(dev, adapter->ip_offload_tok,
2256 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2257
2258 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2259 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2260 netdev_dbg(adapter->netdev, "%016lx\n",
2261 ((unsigned long int *)(buf))[i]);
2262
2263 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2264 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2265 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2266 buf->tcp_ipv4_chksum);
2267 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2268 buf->tcp_ipv6_chksum);
2269 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2270 buf->udp_ipv4_chksum);
2271 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2272 buf->udp_ipv6_chksum);
2273 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2274 buf->large_tx_ipv4);
2275 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2276 buf->large_tx_ipv6);
2277 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2278 buf->large_rx_ipv4);
2279 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2280 buf->large_rx_ipv6);
2281 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2282 buf->max_ipv4_header_size);
2283 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2284 buf->max_ipv6_header_size);
2285 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2286 buf->max_tcp_header_size);
2287 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2288 buf->max_udp_header_size);
2289 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2290 buf->max_large_tx_size);
2291 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2292 buf->max_large_rx_size);
2293 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2294 buf->ipv6_extension_header);
2295 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2296 buf->tcp_pseudosum_req);
2297 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2298 buf->num_ipv6_ext_headers);
2299 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2300 buf->off_ipv6_ext_headers);
2301
2302 adapter->ip_offload_ctrl_tok =
2303 dma_map_single(dev, &adapter->ip_offload_ctrl,
2304 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2305
2306 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2307 dev_err(dev, "Couldn't map ip offload control buffer\n");
2308 return;
2309 }
2310
2311 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2312 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2313 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2314 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2315 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2316
2317 /* large_tx/rx disabled for now, additional features needed */
2318 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2319 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2320 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2321 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2322
2323 adapter->netdev->features = NETIF_F_GSO;
2324
2325 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2326 adapter->netdev->features |= NETIF_F_IP_CSUM;
2327
2328 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2329 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2330
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002331 if ((adapter->netdev->features &
2332 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2333 adapter->netdev->features |= NETIF_F_RXCSUM;
2334
Thomas Falcon032c5e82015-12-21 11:26:06 -06002335 memset(&crq, 0, sizeof(crq));
2336 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2337 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2338 crq.control_ip_offload.len =
2339 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2340 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2341 ibmvnic_send_crq(adapter, &crq);
2342}
2343
2344static void handle_error_info_rsp(union ibmvnic_crq *crq,
2345 struct ibmvnic_adapter *adapter)
2346{
2347 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002348 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002349 unsigned long flags;
2350 bool found = false;
2351 int i;
2352
2353 if (!crq->request_error_rsp.rc.code) {
2354 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2355 crq->request_error_rsp.rc.code);
2356 return;
2357 }
2358
2359 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002360 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002361 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2362 found = true;
2363 list_del(&error_buff->list);
2364 break;
2365 }
2366 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2367
2368 if (!found) {
2369 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falcon75224c92017-02-15 10:33:33 -06002370 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002371 return;
2372 }
2373
2374 dev_err(dev, "Detailed info for error id %x:",
Thomas Falcon75224c92017-02-15 10:33:33 -06002375 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002376
2377 for (i = 0; i < error_buff->len; i++) {
2378 pr_cont("%02x", (int)error_buff->buff[i]);
2379 if (i % 8 == 7)
2380 pr_cont(" ");
2381 }
2382 pr_cont("\n");
2383
2384 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2385 DMA_FROM_DEVICE);
2386 kfree(error_buff->buff);
2387 kfree(error_buff);
2388}
2389
Thomas Falcon032c5e82015-12-21 11:26:06 -06002390static void handle_error_indication(union ibmvnic_crq *crq,
2391 struct ibmvnic_adapter *adapter)
2392{
2393 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002394 struct device *dev = &adapter->vdev->dev;
2395 struct ibmvnic_error_buff *error_buff;
2396 union ibmvnic_crq new_crq;
2397 unsigned long flags;
2398
2399 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2400 crq->error_indication.
2401 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
Thomas Falcon75224c92017-02-15 10:33:33 -06002402 be32_to_cpu(crq->error_indication.error_id),
2403 be16_to_cpu(crq->error_indication.error_cause));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002404
2405 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2406 if (!error_buff)
2407 return;
2408
2409 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2410 if (!error_buff->buff) {
2411 kfree(error_buff);
2412 return;
2413 }
2414
2415 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2416 DMA_FROM_DEVICE);
2417 if (dma_mapping_error(dev, error_buff->dma)) {
2418 if (!firmware_has_feature(FW_FEATURE_CMO))
2419 dev_err(dev, "Couldn't map error buffer\n");
2420 kfree(error_buff->buff);
2421 kfree(error_buff);
2422 return;
2423 }
2424
Thomas Falcon032c5e82015-12-21 11:26:06 -06002425 error_buff->len = detail_len;
2426 error_buff->error_id = crq->error_indication.error_id;
2427
2428 spin_lock_irqsave(&adapter->error_list_lock, flags);
2429 list_add_tail(&error_buff->list, &adapter->errors);
2430 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2431
2432 memset(&new_crq, 0, sizeof(new_crq));
2433 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2434 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2435 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2436 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2437 new_crq.request_error_info.error_id = crq->error_indication.error_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002438 ibmvnic_send_crq(adapter, &new_crq);
2439}
2440
2441static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2442 struct ibmvnic_adapter *adapter)
2443{
2444 struct net_device *netdev = adapter->netdev;
2445 struct device *dev = &adapter->vdev->dev;
2446 long rc;
2447
2448 rc = crq->change_mac_addr_rsp.rc.code;
2449 if (rc) {
2450 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2451 return;
2452 }
2453 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2454 ETH_ALEN);
2455}
2456
2457static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2458 struct ibmvnic_adapter *adapter)
2459{
2460 struct device *dev = &adapter->vdev->dev;
2461 u64 *req_value;
2462 char *name;
2463
Thomas Falcon901e0402017-02-15 12:17:59 -06002464 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002465 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2466 case REQ_TX_QUEUES:
2467 req_value = &adapter->req_tx_queues;
2468 name = "tx";
2469 break;
2470 case REQ_RX_QUEUES:
2471 req_value = &adapter->req_rx_queues;
2472 name = "rx";
2473 break;
2474 case REQ_RX_ADD_QUEUES:
2475 req_value = &adapter->req_rx_add_queues;
2476 name = "rx_add";
2477 break;
2478 case REQ_TX_ENTRIES_PER_SUBCRQ:
2479 req_value = &adapter->req_tx_entries_per_subcrq;
2480 name = "tx_entries_per_subcrq";
2481 break;
2482 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2483 req_value = &adapter->req_rx_add_entries_per_subcrq;
2484 name = "rx_add_entries_per_subcrq";
2485 break;
2486 case REQ_MTU:
2487 req_value = &adapter->req_mtu;
2488 name = "mtu";
2489 break;
2490 case PROMISC_REQUESTED:
2491 req_value = &adapter->promisc;
2492 name = "promisc";
2493 break;
2494 default:
2495 dev_err(dev, "Got invalid cap request rsp %d\n",
2496 crq->request_capability.capability);
2497 return;
2498 }
2499
2500 switch (crq->request_capability_rsp.rc.code) {
2501 case SUCCESS:
2502 break;
2503 case PARTIALSUCCESS:
2504 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2505 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06002506 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06002507 number), name);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002508 release_sub_crqs(adapter);
Thomas Falcon28f4d162017-02-15 10:32:11 -06002509 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
Thomas Falconea22d512016-07-06 15:35:17 -05002510 init_sub_crqs(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002511 return;
2512 default:
2513 dev_err(dev, "Error %d in request cap rsp\n",
2514 crq->request_capability_rsp.rc.code);
2515 return;
2516 }
2517
2518 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06002519 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002520 union ibmvnic_crq newcrq;
2521 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2522 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2523 &adapter->ip_offload_buf;
2524
Thomas Falcon249168a2017-02-15 12:18:00 -06002525 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002526 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2527 buf_sz,
2528 DMA_FROM_DEVICE);
2529
2530 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2531 if (!firmware_has_feature(FW_FEATURE_CMO))
2532 dev_err(dev, "Couldn't map offload buffer\n");
2533 return;
2534 }
2535
2536 memset(&newcrq, 0, sizeof(newcrq));
2537 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2538 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2539 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2540 newcrq.query_ip_offload.ioba =
2541 cpu_to_be32(adapter->ip_offload_tok);
2542
2543 ibmvnic_send_crq(adapter, &newcrq);
2544 }
2545}
2546
2547static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2548 struct ibmvnic_adapter *adapter)
2549{
2550 struct device *dev = &adapter->vdev->dev;
2551 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2552 struct ibmvnic_login_buffer *login = adapter->login_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002553 int i;
2554
2555 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2556 DMA_BIDIRECTIONAL);
2557 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2558 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2559
John Allen498cd8e2016-04-06 11:49:55 -05002560 /* If the number of queues requested can't be allocated by the
2561 * server, the login response will return with code 1. We will need
2562 * to resend the login buffer with fewer queues requested.
2563 */
2564 if (login_rsp_crq->generic.rc.code) {
2565 adapter->renegotiate = true;
2566 complete(&adapter->init_done);
2567 return 0;
2568 }
2569
Thomas Falcon032c5e82015-12-21 11:26:06 -06002570 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2571 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2572 netdev_dbg(adapter->netdev, "%016lx\n",
2573 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2574 }
2575
2576 /* Sanity checks */
2577 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2578 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2579 adapter->req_rx_add_queues !=
2580 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2581 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2582 ibmvnic_remove(adapter->vdev);
2583 return -EIO;
2584 }
2585 complete(&adapter->init_done);
2586
Thomas Falcon032c5e82015-12-21 11:26:06 -06002587 return 0;
2588}
2589
2590static void handle_request_map_rsp(union ibmvnic_crq *crq,
2591 struct ibmvnic_adapter *adapter)
2592{
2593 struct device *dev = &adapter->vdev->dev;
2594 u8 map_id = crq->request_map_rsp.map_id;
2595 int tx_subcrqs;
2596 int rx_subcrqs;
2597 long rc;
2598 int i;
2599
2600 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2601 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2602
2603 rc = crq->request_map_rsp.rc.code;
2604 if (rc) {
2605 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2606 adapter->map_id--;
2607 /* need to find and zero tx/rx_pool map_id */
2608 for (i = 0; i < tx_subcrqs; i++) {
2609 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2610 adapter->tx_pool[i].long_term_buff.map_id = 0;
2611 }
2612 for (i = 0; i < rx_subcrqs; i++) {
2613 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2614 adapter->rx_pool[i].long_term_buff.map_id = 0;
2615 }
2616 }
2617 complete(&adapter->fw_done);
2618}
2619
2620static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2621 struct ibmvnic_adapter *adapter)
2622{
2623 struct device *dev = &adapter->vdev->dev;
2624 long rc;
2625
2626 rc = crq->request_unmap_rsp.rc.code;
2627 if (rc)
2628 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2629}
2630
2631static void handle_query_map_rsp(union ibmvnic_crq *crq,
2632 struct ibmvnic_adapter *adapter)
2633{
2634 struct net_device *netdev = adapter->netdev;
2635 struct device *dev = &adapter->vdev->dev;
2636 long rc;
2637
2638 rc = crq->query_map_rsp.rc.code;
2639 if (rc) {
2640 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2641 return;
2642 }
2643 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2644 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2645 crq->query_map_rsp.free_pages);
2646}
2647
2648static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2649 struct ibmvnic_adapter *adapter)
2650{
2651 struct net_device *netdev = adapter->netdev;
2652 struct device *dev = &adapter->vdev->dev;
2653 long rc;
2654
Thomas Falcon901e0402017-02-15 12:17:59 -06002655 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002656 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06002657 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002658 rc = crq->query_capability.rc.code;
2659 if (rc) {
2660 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2661 goto out;
2662 }
2663
2664 switch (be16_to_cpu(crq->query_capability.capability)) {
2665 case MIN_TX_QUEUES:
2666 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002667 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002668 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2669 adapter->min_tx_queues);
2670 break;
2671 case MIN_RX_QUEUES:
2672 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002673 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002674 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2675 adapter->min_rx_queues);
2676 break;
2677 case MIN_RX_ADD_QUEUES:
2678 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002679 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002680 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2681 adapter->min_rx_add_queues);
2682 break;
2683 case MAX_TX_QUEUES:
2684 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002685 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002686 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2687 adapter->max_tx_queues);
2688 break;
2689 case MAX_RX_QUEUES:
2690 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002691 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002692 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2693 adapter->max_rx_queues);
2694 break;
2695 case MAX_RX_ADD_QUEUES:
2696 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002697 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002698 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2699 adapter->max_rx_add_queues);
2700 break;
2701 case MIN_TX_ENTRIES_PER_SUBCRQ:
2702 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002703 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002704 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2705 adapter->min_tx_entries_per_subcrq);
2706 break;
2707 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2708 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002709 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002710 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2711 adapter->min_rx_add_entries_per_subcrq);
2712 break;
2713 case MAX_TX_ENTRIES_PER_SUBCRQ:
2714 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002715 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002716 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2717 adapter->max_tx_entries_per_subcrq);
2718 break;
2719 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2720 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002721 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002722 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2723 adapter->max_rx_add_entries_per_subcrq);
2724 break;
2725 case TCP_IP_OFFLOAD:
2726 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002727 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002728 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2729 adapter->tcp_ip_offload);
2730 break;
2731 case PROMISC_SUPPORTED:
2732 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002733 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002734 netdev_dbg(netdev, "promisc_supported = %lld\n",
2735 adapter->promisc_supported);
2736 break;
2737 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002738 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002739 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002740 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2741 break;
2742 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002743 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002744 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002745 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2746 break;
2747 case MAX_MULTICAST_FILTERS:
2748 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002749 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002750 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2751 adapter->max_multicast_filters);
2752 break;
2753 case VLAN_HEADER_INSERTION:
2754 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002755 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002756 if (adapter->vlan_header_insertion)
2757 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2758 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2759 adapter->vlan_header_insertion);
2760 break;
2761 case MAX_TX_SG_ENTRIES:
2762 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002763 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002764 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2765 adapter->max_tx_sg_entries);
2766 break;
2767 case RX_SG_SUPPORTED:
2768 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002769 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002770 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2771 adapter->rx_sg_supported);
2772 break;
2773 case OPT_TX_COMP_SUB_QUEUES:
2774 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002775 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002776 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2777 adapter->opt_tx_comp_sub_queues);
2778 break;
2779 case OPT_RX_COMP_QUEUES:
2780 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002781 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002782 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2783 adapter->opt_rx_comp_queues);
2784 break;
2785 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2786 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002787 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002788 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2789 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2790 break;
2791 case OPT_TX_ENTRIES_PER_SUBCRQ:
2792 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002793 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002794 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2795 adapter->opt_tx_entries_per_subcrq);
2796 break;
2797 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2798 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002799 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002800 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2801 adapter->opt_rxba_entries_per_subcrq);
2802 break;
2803 case TX_RX_DESC_REQ:
2804 adapter->tx_rx_desc_req = crq->query_capability.number;
2805 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2806 adapter->tx_rx_desc_req);
2807 break;
2808
2809 default:
2810 netdev_err(netdev, "Got invalid cap rsp %d\n",
2811 crq->query_capability.capability);
2812 }
2813
2814out:
Thomas Falcon249168a2017-02-15 12:18:00 -06002815 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2816 adapter->wait_capability = false;
Thomas Falconea22d512016-07-06 15:35:17 -05002817 init_sub_crqs(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002818 /* We're done querying the capabilities, initialize sub-crqs */
Thomas Falcon249168a2017-02-15 12:18:00 -06002819 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002820}
2821
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002822static void ibmvnic_xport_event(struct work_struct *work)
2823{
2824 struct ibmvnic_adapter *adapter = container_of(work,
2825 struct ibmvnic_adapter,
2826 ibmvnic_xport);
2827 struct device *dev = &adapter->vdev->dev;
2828 long rc;
2829
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002830 release_sub_crqs(adapter);
2831 if (adapter->migrated) {
2832 rc = ibmvnic_reenable_crq_queue(adapter);
2833 if (rc)
2834 dev_err(dev, "Error after enable rc=%ld\n", rc);
2835 adapter->migrated = false;
2836 rc = ibmvnic_send_crq_init(adapter);
2837 if (rc)
2838 dev_err(dev, "Error sending init rc=%ld\n", rc);
2839 }
2840}
2841
Thomas Falcon032c5e82015-12-21 11:26:06 -06002842static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2843 struct ibmvnic_adapter *adapter)
2844{
2845 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2846 struct net_device *netdev = adapter->netdev;
2847 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04002848 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002849 long rc;
2850
2851 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04002852 (unsigned long int)cpu_to_be64(u64_crq[0]),
2853 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002854 switch (gen_crq->first) {
2855 case IBMVNIC_CRQ_INIT_RSP:
2856 switch (gen_crq->cmd) {
2857 case IBMVNIC_CRQ_INIT:
2858 dev_info(dev, "Partner initialized\n");
2859 /* Send back a response */
2860 rc = ibmvnic_send_crq_init_complete(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05002861 if (!rc)
2862 schedule_work(&adapter->vnic_crq_init);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002863 else
2864 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
2865 break;
2866 case IBMVNIC_CRQ_INIT_COMPLETE:
2867 dev_info(dev, "Partner initialization complete\n");
2868 send_version_xchg(adapter);
2869 break;
2870 default:
2871 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
2872 }
2873 return;
2874 case IBMVNIC_CRQ_XPORT_EVENT:
2875 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
2876 dev_info(dev, "Re-enabling adapter\n");
2877 adapter->migrated = true;
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002878 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcondfad09a2016-08-18 11:37:51 -05002879 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
2880 dev_info(dev, "Backing device failover detected\n");
2881 netif_carrier_off(netdev);
2882 adapter->failover = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002883 } else {
2884 /* The adapter lost the connection */
2885 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
2886 gen_crq->cmd);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05002887 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002888 }
2889 return;
2890 case IBMVNIC_CRQ_CMD_RSP:
2891 break;
2892 default:
2893 dev_err(dev, "Got an invalid msg type 0x%02x\n",
2894 gen_crq->first);
2895 return;
2896 }
2897
2898 switch (gen_crq->cmd) {
2899 case VERSION_EXCHANGE_RSP:
2900 rc = crq->version_exchange_rsp.rc.code;
2901 if (rc) {
2902 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
2903 break;
2904 }
2905 dev_info(dev, "Partner protocol version is %d\n",
2906 crq->version_exchange_rsp.version);
2907 if (be16_to_cpu(crq->version_exchange_rsp.version) <
2908 ibmvnic_version)
2909 ibmvnic_version =
2910 be16_to_cpu(crq->version_exchange_rsp.version);
2911 send_cap_queries(adapter);
2912 break;
2913 case QUERY_CAPABILITY_RSP:
2914 handle_query_cap_rsp(crq, adapter);
2915 break;
2916 case QUERY_MAP_RSP:
2917 handle_query_map_rsp(crq, adapter);
2918 break;
2919 case REQUEST_MAP_RSP:
2920 handle_request_map_rsp(crq, adapter);
2921 break;
2922 case REQUEST_UNMAP_RSP:
2923 handle_request_unmap_rsp(crq, adapter);
2924 break;
2925 case REQUEST_CAPABILITY_RSP:
2926 handle_request_cap_rsp(crq, adapter);
2927 break;
2928 case LOGIN_RSP:
2929 netdev_dbg(netdev, "Got Login Response\n");
2930 handle_login_rsp(crq, adapter);
2931 break;
2932 case LOGICAL_LINK_STATE_RSP:
2933 netdev_dbg(netdev, "Got Logical Link State Response\n");
2934 adapter->logical_link_state =
2935 crq->logical_link_state_rsp.link_state;
2936 break;
2937 case LINK_STATE_INDICATION:
2938 netdev_dbg(netdev, "Got Logical Link State Indication\n");
2939 adapter->phys_link_state =
2940 crq->link_state_indication.phys_link_state;
2941 adapter->logical_link_state =
2942 crq->link_state_indication.logical_link_state;
2943 break;
2944 case CHANGE_MAC_ADDR_RSP:
2945 netdev_dbg(netdev, "Got MAC address change Response\n");
2946 handle_change_mac_rsp(crq, adapter);
2947 break;
2948 case ERROR_INDICATION:
2949 netdev_dbg(netdev, "Got Error Indication\n");
2950 handle_error_indication(crq, adapter);
2951 break;
2952 case REQUEST_ERROR_RSP:
2953 netdev_dbg(netdev, "Got Error Detail Response\n");
2954 handle_error_info_rsp(crq, adapter);
2955 break;
2956 case REQUEST_STATISTICS_RSP:
2957 netdev_dbg(netdev, "Got Statistics Response\n");
2958 complete(&adapter->stats_done);
2959 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002960 case QUERY_IP_OFFLOAD_RSP:
2961 netdev_dbg(netdev, "Got Query IP offload Response\n");
2962 handle_query_ip_offload_rsp(adapter);
2963 break;
2964 case MULTICAST_CTRL_RSP:
2965 netdev_dbg(netdev, "Got multicast control Response\n");
2966 break;
2967 case CONTROL_IP_OFFLOAD_RSP:
2968 netdev_dbg(netdev, "Got Control IP offload Response\n");
2969 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
2970 sizeof(adapter->ip_offload_ctrl),
2971 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05002972 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002973 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002974 case COLLECT_FW_TRACE_RSP:
2975 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
2976 complete(&adapter->fw_done);
2977 break;
2978 default:
2979 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
2980 gen_crq->cmd);
2981 }
2982}
2983
2984static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
2985{
2986 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06002987
Thomas Falcon6c267b32017-02-15 12:17:58 -06002988 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06002989 return IRQ_HANDLED;
2990}
2991
2992static void ibmvnic_tasklet(void *data)
2993{
2994 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002995 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002996 union ibmvnic_crq *crq;
2997 unsigned long flags;
2998 bool done = false;
2999
3000 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003001 while (!done) {
3002 /* Pull all the valid messages off the CRQ */
3003 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3004 ibmvnic_handle_crq(crq, adapter);
3005 crq->generic.first = 0;
3006 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04003007
3008 /* remain in tasklet until all
3009 * capabilities responses are received
3010 */
3011 if (!adapter->wait_capability)
3012 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003013 }
Thomas Falcon249168a2017-02-15 12:18:00 -06003014 /* if capabilities CRQ's were sent in this tasklet, the following
3015 * tasklet must wait until all responses are received
3016 */
3017 if (atomic_read(&adapter->running_cap_crqs) != 0)
3018 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003019 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003020}
3021
3022static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3023{
3024 struct vio_dev *vdev = adapter->vdev;
3025 int rc;
3026
3027 do {
3028 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3029 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3030
3031 if (rc)
3032 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3033
3034 return rc;
3035}
3036
3037static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3038{
3039 struct ibmvnic_crq_queue *crq = &adapter->crq;
3040 struct device *dev = &adapter->vdev->dev;
3041 struct vio_dev *vdev = adapter->vdev;
3042 int rc;
3043
3044 /* Close the CRQ */
3045 do {
3046 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3047 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3048
3049 /* Clean out the queue */
3050 memset(crq->msgs, 0, PAGE_SIZE);
3051 crq->cur = 0;
3052
3053 /* And re-open it again */
3054 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3055 crq->msg_token, PAGE_SIZE);
3056
3057 if (rc == H_CLOSED)
3058 /* Adapter is good, but other end is not ready */
3059 dev_warn(dev, "Partner adapter not ready\n");
3060 else if (rc != 0)
3061 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3062
3063 return rc;
3064}
3065
Nathan Fontenotf9928872017-03-30 02:48:54 -04003066static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003067{
3068 struct ibmvnic_crq_queue *crq = &adapter->crq;
3069 struct vio_dev *vdev = adapter->vdev;
3070 long rc;
3071
Nathan Fontenotf9928872017-03-30 02:48:54 -04003072 if (!crq->msgs)
3073 return;
3074
Thomas Falcon032c5e82015-12-21 11:26:06 -06003075 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3076 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06003077 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003078 do {
3079 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3080 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3081
3082 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3083 DMA_BIDIRECTIONAL);
3084 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04003085 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003086}
3087
Nathan Fontenotf9928872017-03-30 02:48:54 -04003088static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003089{
3090 struct ibmvnic_crq_queue *crq = &adapter->crq;
3091 struct device *dev = &adapter->vdev->dev;
3092 struct vio_dev *vdev = adapter->vdev;
3093 int rc, retrc = -ENOMEM;
3094
Nathan Fontenotf9928872017-03-30 02:48:54 -04003095 if (crq->msgs)
3096 return 0;
3097
Thomas Falcon032c5e82015-12-21 11:26:06 -06003098 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3099 /* Should we allocate more than one page? */
3100
3101 if (!crq->msgs)
3102 return -ENOMEM;
3103
3104 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3105 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3106 DMA_BIDIRECTIONAL);
3107 if (dma_mapping_error(dev, crq->msg_token))
3108 goto map_failed;
3109
3110 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3111 crq->msg_token, PAGE_SIZE);
3112
3113 if (rc == H_RESOURCE)
3114 /* maybe kexecing and resource is busy. try a reset */
3115 rc = ibmvnic_reset_crq(adapter);
3116 retrc = rc;
3117
3118 if (rc == H_CLOSED) {
3119 dev_warn(dev, "Partner adapter not ready\n");
3120 } else if (rc) {
3121 dev_warn(dev, "Error %d opening adapter\n", rc);
3122 goto reg_crq_failed;
3123 }
3124
3125 retrc = 0;
3126
Thomas Falcon6c267b32017-02-15 12:17:58 -06003127 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3128 (unsigned long)adapter);
3129
Thomas Falcon032c5e82015-12-21 11:26:06 -06003130 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3131 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3132 adapter);
3133 if (rc) {
3134 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3135 vdev->irq, rc);
3136 goto req_irq_failed;
3137 }
3138
3139 rc = vio_enable_interrupts(vdev);
3140 if (rc) {
3141 dev_err(dev, "Error %d enabling interrupts\n", rc);
3142 goto req_irq_failed;
3143 }
3144
3145 crq->cur = 0;
3146 spin_lock_init(&crq->lock);
3147
3148 return retrc;
3149
3150req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06003151 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003152 do {
3153 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3154 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3155reg_crq_failed:
3156 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3157map_failed:
3158 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04003159 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003160 return retrc;
3161}
3162
Thomas Falcon65dc6892016-07-06 15:35:18 -05003163static void handle_crq_init_rsp(struct work_struct *work)
3164{
3165 struct ibmvnic_adapter *adapter = container_of(work,
3166 struct ibmvnic_adapter,
3167 vnic_crq_init);
3168 struct device *dev = &adapter->vdev->dev;
3169 struct net_device *netdev = adapter->netdev;
3170 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003171 bool restart = false;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003172 int rc;
3173
Thomas Falcondfad09a2016-08-18 11:37:51 -05003174 if (adapter->failover) {
3175 release_sub_crqs(adapter);
3176 if (netif_running(netdev)) {
3177 netif_tx_disable(netdev);
3178 ibmvnic_close(netdev);
3179 restart = true;
3180 }
3181 }
3182
Thomas Falcon65dc6892016-07-06 15:35:18 -05003183 reinit_completion(&adapter->init_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003184 send_version_xchg(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003185 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3186 dev_err(dev, "Passive init timeout\n");
3187 goto task_failed;
3188 }
3189
Thomas Falconf39f0d12017-02-14 10:22:59 -06003190 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003191
Thomas Falcondfad09a2016-08-18 11:37:51 -05003192 if (adapter->failover) {
3193 adapter->failover = false;
3194 if (restart) {
3195 rc = ibmvnic_open(netdev);
3196 if (rc)
3197 goto restart_failed;
3198 }
3199 netif_carrier_on(netdev);
3200 return;
3201 }
3202
Thomas Falcon65dc6892016-07-06 15:35:18 -05003203 rc = register_netdev(netdev);
3204 if (rc) {
3205 dev_err(dev,
3206 "failed to register netdev rc=%d\n", rc);
3207 goto register_failed;
3208 }
3209 dev_info(dev, "ibmvnic registered\n");
3210
3211 return;
3212
Thomas Falcondfad09a2016-08-18 11:37:51 -05003213restart_failed:
3214 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003215register_failed:
3216 release_sub_crqs(adapter);
3217task_failed:
3218 dev_err(dev, "Passive initialization was not successful\n");
3219}
3220
John Allenf6ef6402017-03-17 17:13:42 -05003221static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3222{
3223 struct device *dev = &adapter->vdev->dev;
3224 unsigned long timeout = msecs_to_jiffies(30000);
John Allenf6ef6402017-03-17 17:13:42 -05003225 int rc;
3226
Nathan Fontenotf9928872017-03-30 02:48:54 -04003227 rc = init_crq_queue(adapter);
John Allenf6ef6402017-03-17 17:13:42 -05003228 if (rc) {
3229 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3230 return rc;
3231 }
3232
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -04003233 rc = init_stats_token(adapter);
3234 if (rc) {
Nathan Fontenotf9928872017-03-30 02:48:54 -04003235 release_crq_queue(adapter);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -04003236 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05003237 }
3238
John Allenf6ef6402017-03-17 17:13:42 -05003239 init_completion(&adapter->init_done);
3240 ibmvnic_send_crq_init(adapter);
3241 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3242 dev_err(dev, "Initialization sequence timed out\n");
Nathan Fontenotf9928872017-03-30 02:48:54 -04003243 release_crq_queue(adapter);
John Allenf6ef6402017-03-17 17:13:42 -05003244 return -1;
3245 }
3246
3247 return 0;
3248}
3249
Thomas Falcon032c5e82015-12-21 11:26:06 -06003250static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3251{
3252 struct ibmvnic_adapter *adapter;
3253 struct net_device *netdev;
3254 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003255 int rc;
3256
3257 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3258 dev->unit_address);
3259
3260 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3261 VETH_MAC_ADDR, NULL);
3262 if (!mac_addr_p) {
3263 dev_err(&dev->dev,
3264 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3265 __FILE__, __LINE__);
3266 return 0;
3267 }
3268
3269 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3270 IBMVNIC_MAX_TX_QUEUES);
3271 if (!netdev)
3272 return -ENOMEM;
3273
3274 adapter = netdev_priv(netdev);
3275 dev_set_drvdata(&dev->dev, netdev);
3276 adapter->vdev = dev;
3277 adapter->netdev = netdev;
Thomas Falcondfad09a2016-08-18 11:37:51 -05003278 adapter->failover = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003279
3280 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3281 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3282 netdev->irq = dev->irq;
3283 netdev->netdev_ops = &ibmvnic_netdev_ops;
3284 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3285 SET_NETDEV_DEV(netdev, &dev->dev);
3286
Thomas Falcon65dc6892016-07-06 15:35:18 -05003287 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003288 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003289
Thomas Falcon032c5e82015-12-21 11:26:06 -06003290 spin_lock_init(&adapter->stats_lock);
3291
Thomas Falcon032c5e82015-12-21 11:26:06 -06003292 INIT_LIST_HEAD(&adapter->errors);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003293 spin_lock_init(&adapter->error_list_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003294
John Allenf6ef6402017-03-17 17:13:42 -05003295 rc = ibmvnic_init(adapter);
3296 if (rc) {
3297 free_netdev(netdev);
3298 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003299 }
3300
Thomas Falconf39f0d12017-02-14 10:22:59 -06003301 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenea5509f2017-03-17 17:13:43 -05003302 adapter->is_closed = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003303
3304 rc = register_netdev(netdev);
3305 if (rc) {
3306 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
John Allenf6ef6402017-03-17 17:13:42 -05003307 free_netdev(netdev);
3308 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003309 }
3310 dev_info(&dev->dev, "ibmvnic registered\n");
3311
3312 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003313}
3314
3315static int ibmvnic_remove(struct vio_dev *dev)
3316{
3317 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04003318 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003319
3320 unregister_netdev(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04003321
3322 release_resources(adapter);
3323 release_sub_crqs(adapter);
3324 release_crq_queue(adapter);
3325
Thomas Falcon032c5e82015-12-21 11:26:06 -06003326 free_netdev(netdev);
3327 dev_set_drvdata(&dev->dev, NULL);
3328
3329 return 0;
3330}
3331
3332static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3333{
3334 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3335 struct ibmvnic_adapter *adapter;
3336 struct iommu_table *tbl;
3337 unsigned long ret = 0;
3338 int i;
3339
3340 tbl = get_iommu_table_base(&vdev->dev);
3341
3342 /* netdev inits at probe time along with the structures we need below*/
3343 if (!netdev)
3344 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3345
3346 adapter = netdev_priv(netdev);
3347
3348 ret += PAGE_SIZE; /* the crq message queue */
3349 ret += adapter->bounce_buffer_size;
3350 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3351
3352 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3353 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3354
3355 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3356 i++)
3357 ret += adapter->rx_pool[i].size *
3358 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3359
3360 return ret;
3361}
3362
3363static int ibmvnic_resume(struct device *dev)
3364{
3365 struct net_device *netdev = dev_get_drvdata(dev);
3366 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3367 int i;
3368
3369 /* kick the interrupt handlers just in case we lost an interrupt */
3370 for (i = 0; i < adapter->req_rx_queues; i++)
3371 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3372 adapter->rx_scrq[i]);
3373
3374 return 0;
3375}
3376
3377static struct vio_device_id ibmvnic_device_table[] = {
3378 {"network", "IBM,vnic"},
3379 {"", "" }
3380};
3381MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3382
3383static const struct dev_pm_ops ibmvnic_pm_ops = {
3384 .resume = ibmvnic_resume
3385};
3386
3387static struct vio_driver ibmvnic_driver = {
3388 .id_table = ibmvnic_device_table,
3389 .probe = ibmvnic_probe,
3390 .remove = ibmvnic_remove,
3391 .get_desired_dma = ibmvnic_get_desired_dma,
3392 .name = ibmvnic_driver_name,
3393 .pm = &ibmvnic_pm_ops,
3394};
3395
3396/* module functions */
3397static int __init ibmvnic_module_init(void)
3398{
3399 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3400 IBMVNIC_DRIVER_VERSION);
3401
3402 return vio_register_driver(&ibmvnic_driver);
3403}
3404
3405static void __exit ibmvnic_module_exit(void)
3406{
3407 vio_unregister_driver(&ibmvnic_driver);
3408}
3409
3410module_init(ibmvnic_module_init);
3411module_exit(ibmvnic_module_exit);