blob: 61d9d4045b4c856d6749aa11b1031463540e270e [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050077#include <linux/workqueue.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060078
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
Thomas Falconea22d512016-07-06 15:35:17 -050092static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -060093static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050099static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600100static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109static int ibmvnic_poll(struct napi_struct *napi, int data);
110static void send_map_query(struct ibmvnic_adapter *adapter);
111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112static void send_request_unmap(struct ibmvnic_adapter *, u8);
John Allenbd0b6722017-03-17 17:13:40 -0500113static void send_login(struct ibmvnic_adapter *adapter);
114static void send_cap_queries(struct ibmvnic_adapter *adapter);
115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600116
117struct ibmvnic_stat {
118 char name[ETH_GSTRING_LEN];
119 int offset;
120};
121
122#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
123 offsetof(struct ibmvnic_statistics, stat))
124#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
125
126static const struct ibmvnic_stat ibmvnic_stats[] = {
127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
149};
150
151static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
152 unsigned long length, unsigned long *number,
153 unsigned long *irq)
154{
155 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
156 long rc;
157
158 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
159 *number = retbuf[0];
160 *irq = retbuf[1];
161
162 return rc;
163}
164
165/* net_device_ops functions */
166
167static void init_rx_pool(struct ibmvnic_adapter *adapter,
168 struct ibmvnic_rx_pool *rx_pool, int num, int index,
169 int buff_size, int active)
170{
171 netdev_dbg(adapter->netdev,
172 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
173 index, num, buff_size);
174 rx_pool->size = num;
175 rx_pool->index = index;
176 rx_pool->buff_size = buff_size;
177 rx_pool->active = active;
178}
179
180static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
181 struct ibmvnic_long_term_buff *ltb, int size)
182{
183 struct device *dev = &adapter->vdev->dev;
184
185 ltb->size = size;
186 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
187 GFP_KERNEL);
188
189 if (!ltb->buff) {
190 dev_err(dev, "Couldn't alloc long term buffer\n");
191 return -ENOMEM;
192 }
193 ltb->map_id = adapter->map_id;
194 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500195
196 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600197 send_request_map(adapter, ltb->addr,
198 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600199 wait_for_completion(&adapter->fw_done);
200 return 0;
201}
202
203static void free_long_term_buff(struct ibmvnic_adapter *adapter,
204 struct ibmvnic_long_term_buff *ltb)
205{
206 struct device *dev = &adapter->vdev->dev;
207
208 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcondfad09a2016-08-18 11:37:51 -0500209 if (!adapter->failover)
210 send_request_unmap(adapter, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600211}
212
213static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
214 struct ibmvnic_rx_pool *pool)
215{
216 struct device *dev = &adapter->vdev->dev;
217 int i;
218
219 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
220 if (!pool->free_map)
221 return -ENOMEM;
222
223 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
224 GFP_KERNEL);
225
226 if (!pool->rx_buff) {
227 dev_err(dev, "Couldn't alloc rx buffers\n");
228 kfree(pool->free_map);
229 return -ENOMEM;
230 }
231
232 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
233 pool->size * pool->buff_size)) {
234 kfree(pool->free_map);
235 kfree(pool->rx_buff);
236 return -ENOMEM;
237 }
238
239 for (i = 0; i < pool->size; ++i)
240 pool->free_map[i] = i;
241
242 atomic_set(&pool->available, 0);
243 pool->next_alloc = 0;
244 pool->next_free = 0;
245
246 return 0;
247}
248
249static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
250 struct ibmvnic_rx_pool *pool)
251{
252 int count = pool->size - atomic_read(&pool->available);
253 struct device *dev = &adapter->vdev->dev;
254 int buffers_added = 0;
255 unsigned long lpar_rc;
256 union sub_crq sub_crq;
257 struct sk_buff *skb;
258 unsigned int offset;
259 dma_addr_t dma_addr;
260 unsigned char *dst;
261 u64 *handle_array;
262 int shift = 0;
263 int index;
264 int i;
265
266 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
267 be32_to_cpu(adapter->login_rsp_buf->
268 off_rxadd_subcrqs));
269
270 for (i = 0; i < count; ++i) {
271 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
272 if (!skb) {
273 dev_err(dev, "Couldn't replenish rx buff\n");
274 adapter->replenish_no_mem++;
275 break;
276 }
277
278 index = pool->free_map[pool->next_free];
279
280 if (pool->rx_buff[index].skb)
281 dev_err(dev, "Inconsistent free_map!\n");
282
283 /* Copy the skb to the long term mapped DMA buffer */
284 offset = index * pool->buff_size;
285 dst = pool->long_term_buff.buff + offset;
286 memset(dst, 0, pool->buff_size);
287 dma_addr = pool->long_term_buff.addr + offset;
288 pool->rx_buff[index].data = dst;
289
290 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
291 pool->rx_buff[index].dma = dma_addr;
292 pool->rx_buff[index].skb = skb;
293 pool->rx_buff[index].pool_index = pool->index;
294 pool->rx_buff[index].size = pool->buff_size;
295
296 memset(&sub_crq, 0, sizeof(sub_crq));
297 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
298 sub_crq.rx_add.correlator =
299 cpu_to_be64((u64)&pool->rx_buff[index]);
300 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
301 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
302
303 /* The length field of the sCRQ is defined to be 24 bits so the
304 * buffer size needs to be left shifted by a byte before it is
305 * converted to big endian to prevent the last byte from being
306 * truncated.
307 */
308#ifdef __LITTLE_ENDIAN__
309 shift = 8;
310#endif
311 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
312
313 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
314 &sub_crq);
315 if (lpar_rc != H_SUCCESS)
316 goto failure;
317
318 buffers_added++;
319 adapter->replenish_add_buff_success++;
320 pool->next_free = (pool->next_free + 1) % pool->size;
321 }
322 atomic_add(buffers_added, &pool->available);
323 return;
324
325failure:
326 dev_info(dev, "replenish pools failure\n");
327 pool->free_map[pool->next_free] = index;
328 pool->rx_buff[index].skb = NULL;
329 if (!dma_mapping_error(dev, dma_addr))
330 dma_unmap_single(dev, dma_addr, pool->buff_size,
331 DMA_FROM_DEVICE);
332
333 dev_kfree_skb_any(skb);
334 adapter->replenish_add_buff_failure++;
335 atomic_add(buffers_added, &pool->available);
336}
337
338static void replenish_pools(struct ibmvnic_adapter *adapter)
339{
340 int i;
341
342 if (adapter->migrated)
343 return;
344
345 adapter->replenish_task_cycles++;
346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
347 i++) {
348 if (adapter->rx_pool[i].active)
349 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 }
351}
352
353static void free_rx_pool(struct ibmvnic_adapter *adapter,
354 struct ibmvnic_rx_pool *pool)
355{
356 int i;
357
358 kfree(pool->free_map);
359 pool->free_map = NULL;
360
361 if (!pool->rx_buff)
362 return;
363
364 for (i = 0; i < pool->size; i++) {
365 if (pool->rx_buff[i].skb) {
366 dev_kfree_skb_any(pool->rx_buff[i].skb);
367 pool->rx_buff[i].skb = NULL;
368 }
369 }
370 kfree(pool->rx_buff);
371 pool->rx_buff = NULL;
372}
373
374static int ibmvnic_open(struct net_device *netdev)
375{
376 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500377 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600378 struct device *dev = &adapter->vdev->dev;
379 struct ibmvnic_tx_pool *tx_pool;
380 union ibmvnic_crq crq;
381 int rxadd_subcrqs;
382 u64 *size_array;
383 int tx_subcrqs;
John Allenbd0b6722017-03-17 17:13:40 -0500384 int rc = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600385 int i, j;
386
John Allenbd0b6722017-03-17 17:13:40 -0500387 do {
388 if (adapter->renegotiate) {
389 adapter->renegotiate = false;
390 release_sub_crqs_no_irqs(adapter);
391
392 reinit_completion(&adapter->init_done);
393 send_cap_queries(adapter);
394 if (!wait_for_completion_timeout(&adapter->init_done,
395 timeout)) {
396 dev_err(dev, "Capabilities query timeout\n");
397 return -1;
398 }
399 }
400
401 reinit_completion(&adapter->init_done);
402 send_login(adapter);
403 if (!wait_for_completion_timeout(&adapter->init_done,
404 timeout)) {
405 dev_err(dev, "Login timeout\n");
406 return -1;
407 }
408 } while (adapter->renegotiate);
409
410 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
411 if (rc) {
412 dev_err(dev, "failed to set the number of tx queues\n");
413 return -1;
414 }
415
416 rc = init_sub_crq_irqs(adapter);
417 if (rc) {
418 dev_err(dev, "failed to initialize sub crq irqs\n");
419 return -1;
420 }
421
Thomas Falcon032c5e82015-12-21 11:26:06 -0600422 rxadd_subcrqs =
423 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
424 tx_subcrqs =
425 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
426 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
427 be32_to_cpu(adapter->login_rsp_buf->
428 off_rxadd_buff_size));
429 adapter->map_id = 1;
430 adapter->napi = kcalloc(adapter->req_rx_queues,
431 sizeof(struct napi_struct), GFP_KERNEL);
432 if (!adapter->napi)
433 goto alloc_napi_failed;
434 for (i = 0; i < adapter->req_rx_queues; i++) {
435 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
436 NAPI_POLL_WEIGHT);
437 napi_enable(&adapter->napi[i]);
438 }
439 adapter->rx_pool =
440 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
441
442 if (!adapter->rx_pool)
443 goto rx_pool_arr_alloc_failed;
444 send_map_query(adapter);
445 for (i = 0; i < rxadd_subcrqs; i++) {
446 init_rx_pool(adapter, &adapter->rx_pool[i],
Thomas Falcon068d9f92017-03-05 12:18:42 -0600447 adapter->req_rx_add_entries_per_subcrq, i,
Thomas Falcon032c5e82015-12-21 11:26:06 -0600448 be64_to_cpu(size_array[i]), 1);
449 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
450 dev_err(dev, "Couldn't alloc rx pool\n");
451 goto rx_pool_alloc_failed;
452 }
453 }
454 adapter->tx_pool =
455 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
456
457 if (!adapter->tx_pool)
458 goto tx_pool_arr_alloc_failed;
459 for (i = 0; i < tx_subcrqs; i++) {
460 tx_pool = &adapter->tx_pool[i];
461 tx_pool->tx_buff =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600462 kcalloc(adapter->req_tx_entries_per_subcrq,
Thomas Falcon032c5e82015-12-21 11:26:06 -0600463 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
464 if (!tx_pool->tx_buff)
465 goto tx_pool_alloc_failed;
466
467 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
Thomas Falcon068d9f92017-03-05 12:18:42 -0600468 adapter->req_tx_entries_per_subcrq *
Thomas Falcon032c5e82015-12-21 11:26:06 -0600469 adapter->req_mtu))
470 goto tx_ltb_alloc_failed;
471
472 tx_pool->free_map =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600473 kcalloc(adapter->req_tx_entries_per_subcrq,
Thomas Falcon032c5e82015-12-21 11:26:06 -0600474 sizeof(int), GFP_KERNEL);
475 if (!tx_pool->free_map)
476 goto tx_fm_alloc_failed;
477
Thomas Falcon068d9f92017-03-05 12:18:42 -0600478 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600479 tx_pool->free_map[j] = j;
480
481 tx_pool->consumer_index = 0;
482 tx_pool->producer_index = 0;
483 }
484 adapter->bounce_buffer_size =
485 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
486 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
487 GFP_KERNEL);
488 if (!adapter->bounce_buffer)
489 goto bounce_alloc_failed;
490
491 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
492 adapter->bounce_buffer_size,
493 DMA_TO_DEVICE);
494 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
495 dev_err(dev, "Couldn't map tx bounce buffer\n");
496 goto bounce_map_failed;
497 }
498 replenish_pools(adapter);
499
500 /* We're ready to receive frames, enable the sub-crq interrupts and
501 * set the logical link state to up
502 */
503 for (i = 0; i < adapter->req_rx_queues; i++)
504 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
505
506 for (i = 0; i < adapter->req_tx_queues; i++)
507 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
508
509 memset(&crq, 0, sizeof(crq));
510 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
511 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
512 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
513 ibmvnic_send_crq(adapter, &crq);
514
Thomas Falconb8efb892016-07-06 15:35:15 -0500515 netif_tx_start_all_queues(netdev);
516
Thomas Falcon032c5e82015-12-21 11:26:06 -0600517 return 0;
518
519bounce_map_failed:
520 kfree(adapter->bounce_buffer);
521bounce_alloc_failed:
522 i = tx_subcrqs - 1;
523 kfree(adapter->tx_pool[i].free_map);
524tx_fm_alloc_failed:
525 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
526tx_ltb_alloc_failed:
527 kfree(adapter->tx_pool[i].tx_buff);
528tx_pool_alloc_failed:
529 for (j = 0; j < i; j++) {
530 kfree(adapter->tx_pool[j].tx_buff);
531 free_long_term_buff(adapter,
532 &adapter->tx_pool[j].long_term_buff);
533 kfree(adapter->tx_pool[j].free_map);
534 }
535 kfree(adapter->tx_pool);
536 adapter->tx_pool = NULL;
537tx_pool_arr_alloc_failed:
538 i = rxadd_subcrqs;
539rx_pool_alloc_failed:
540 for (j = 0; j < i; j++) {
541 free_rx_pool(adapter, &adapter->rx_pool[j]);
542 free_long_term_buff(adapter,
543 &adapter->rx_pool[j].long_term_buff);
544 }
545 kfree(adapter->rx_pool);
546 adapter->rx_pool = NULL;
547rx_pool_arr_alloc_failed:
548 for (i = 0; i < adapter->req_rx_queues; i++)
Nathan Fontenote722af62017-02-10 13:29:06 -0500549 napi_disable(&adapter->napi[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600550alloc_napi_failed:
John Allenbd0b6722017-03-17 17:13:40 -0500551 release_sub_crqs(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600552 return -ENOMEM;
553}
554
555static int ibmvnic_close(struct net_device *netdev)
556{
557 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
558 struct device *dev = &adapter->vdev->dev;
559 union ibmvnic_crq crq;
560 int i;
561
562 adapter->closing = true;
563
564 for (i = 0; i < adapter->req_rx_queues; i++)
565 napi_disable(&adapter->napi[i]);
566
Thomas Falcondfad09a2016-08-18 11:37:51 -0500567 if (!adapter->failover)
568 netif_tx_stop_all_queues(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600569
570 if (adapter->bounce_buffer) {
571 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
572 dma_unmap_single(&adapter->vdev->dev,
573 adapter->bounce_buffer_dma,
574 adapter->bounce_buffer_size,
575 DMA_BIDIRECTIONAL);
576 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
577 }
578 kfree(adapter->bounce_buffer);
579 adapter->bounce_buffer = NULL;
580 }
581
582 memset(&crq, 0, sizeof(crq));
583 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
584 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
585 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
586 ibmvnic_send_crq(adapter, &crq);
587
588 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
589 i++) {
590 kfree(adapter->tx_pool[i].tx_buff);
591 free_long_term_buff(adapter,
592 &adapter->tx_pool[i].long_term_buff);
593 kfree(adapter->tx_pool[i].free_map);
594 }
595 kfree(adapter->tx_pool);
596 adapter->tx_pool = NULL;
597
598 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
599 i++) {
600 free_rx_pool(adapter, &adapter->rx_pool[i]);
601 free_long_term_buff(adapter,
602 &adapter->rx_pool[i].long_term_buff);
603 }
604 kfree(adapter->rx_pool);
605 adapter->rx_pool = NULL;
606
607 adapter->closing = false;
608
609 return 0;
610}
611
Thomas Falconad7775d2016-04-01 17:20:34 -0500612/**
613 * build_hdr_data - creates L2/L3/L4 header data buffer
614 * @hdr_field - bitfield determining needed headers
615 * @skb - socket buffer
616 * @hdr_len - array of header lengths
617 * @tot_len - total length of data
618 *
619 * Reads hdr_field to determine which headers are needed by firmware.
620 * Builds a buffer containing these headers. Saves individual header
621 * lengths and total buffer length to be used to build descriptors.
622 */
623static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
624 int *hdr_len, u8 *hdr_data)
625{
626 int len = 0;
627 u8 *hdr;
628
629 hdr_len[0] = sizeof(struct ethhdr);
630
631 if (skb->protocol == htons(ETH_P_IP)) {
632 hdr_len[1] = ip_hdr(skb)->ihl * 4;
633 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
634 hdr_len[2] = tcp_hdrlen(skb);
635 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
636 hdr_len[2] = sizeof(struct udphdr);
637 } else if (skb->protocol == htons(ETH_P_IPV6)) {
638 hdr_len[1] = sizeof(struct ipv6hdr);
639 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
640 hdr_len[2] = tcp_hdrlen(skb);
641 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
642 hdr_len[2] = sizeof(struct udphdr);
643 }
644
645 memset(hdr_data, 0, 120);
646 if ((hdr_field >> 6) & 1) {
647 hdr = skb_mac_header(skb);
648 memcpy(hdr_data, hdr, hdr_len[0]);
649 len += hdr_len[0];
650 }
651
652 if ((hdr_field >> 5) & 1) {
653 hdr = skb_network_header(skb);
654 memcpy(hdr_data + len, hdr, hdr_len[1]);
655 len += hdr_len[1];
656 }
657
658 if ((hdr_field >> 4) & 1) {
659 hdr = skb_transport_header(skb);
660 memcpy(hdr_data + len, hdr, hdr_len[2]);
661 len += hdr_len[2];
662 }
663 return len;
664}
665
666/**
667 * create_hdr_descs - create header and header extension descriptors
668 * @hdr_field - bitfield determining needed headers
669 * @data - buffer containing header data
670 * @len - length of data buffer
671 * @hdr_len - array of individual header lengths
672 * @scrq_arr - descriptor array
673 *
674 * Creates header and, if needed, header extension descriptors and
675 * places them in a descriptor array, scrq_arr
676 */
677
678static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
679 union sub_crq *scrq_arr)
680{
681 union sub_crq hdr_desc;
682 int tmp_len = len;
683 u8 *data, *cur;
684 int tmp;
685
686 while (tmp_len > 0) {
687 cur = hdr_data + len - tmp_len;
688
689 memset(&hdr_desc, 0, sizeof(hdr_desc));
690 if (cur != hdr_data) {
691 data = hdr_desc.hdr_ext.data;
692 tmp = tmp_len > 29 ? 29 : tmp_len;
693 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
694 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
695 hdr_desc.hdr_ext.len = tmp;
696 } else {
697 data = hdr_desc.hdr.data;
698 tmp = tmp_len > 24 ? 24 : tmp_len;
699 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
700 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
701 hdr_desc.hdr.len = tmp;
702 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
703 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
704 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
705 hdr_desc.hdr.flag = hdr_field << 1;
706 }
707 memcpy(data, cur, tmp);
708 tmp_len -= tmp;
709 *scrq_arr = hdr_desc;
710 scrq_arr++;
711 }
712}
713
714/**
715 * build_hdr_descs_arr - build a header descriptor array
716 * @skb - socket buffer
717 * @num_entries - number of descriptors to be sent
718 * @subcrq - first TX descriptor
719 * @hdr_field - bit field determining which headers will be sent
720 *
721 * This function will build a TX descriptor array with applicable
722 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
723 */
724
725static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
726 int *num_entries, u8 hdr_field)
727{
728 int hdr_len[3] = {0, 0, 0};
729 int tot_len, len;
730 u8 *hdr_data = txbuff->hdr_data;
731
732 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
733 txbuff->hdr_data);
734 len = tot_len;
735 len -= 24;
736 if (len > 0)
737 num_entries += len % 29 ? len / 29 + 1 : len / 29;
738 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
739 txbuff->indir_arr + 1);
740}
741
Thomas Falcon032c5e82015-12-21 11:26:06 -0600742static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
743{
744 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
745 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500746 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600747 struct device *dev = &adapter->vdev->dev;
748 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600749 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600750 struct ibmvnic_tx_pool *tx_pool;
751 unsigned int tx_send_failed = 0;
752 unsigned int tx_map_failed = 0;
753 unsigned int tx_dropped = 0;
754 unsigned int tx_packets = 0;
755 unsigned int tx_bytes = 0;
756 dma_addr_t data_dma_addr;
757 struct netdev_queue *txq;
758 bool used_bounce = false;
759 unsigned long lpar_rc;
760 union sub_crq tx_crq;
761 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500762 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600763 unsigned char *dst;
764 u64 *handle_array;
765 int index = 0;
766 int ret = 0;
767
768 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600769 tx_scrq = adapter->tx_scrq[queue_num];
Thomas Falcon032c5e82015-12-21 11:26:06 -0600770 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
771 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
772 be32_to_cpu(adapter->login_rsp_buf->
773 off_txsubm_subcrqs));
774 if (adapter->migrated) {
775 tx_send_failed++;
776 tx_dropped++;
777 ret = NETDEV_TX_BUSY;
778 goto out;
779 }
780
781 index = tx_pool->free_map[tx_pool->consumer_index];
782 offset = index * adapter->req_mtu;
783 dst = tx_pool->long_term_buff.buff + offset;
784 memset(dst, 0, adapter->req_mtu);
785 skb_copy_from_linear_data(skb, dst, skb->len);
786 data_dma_addr = tx_pool->long_term_buff.addr + offset;
787
788 tx_pool->consumer_index =
789 (tx_pool->consumer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -0600790 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600791
792 tx_buff = &tx_pool->tx_buff[index];
793 tx_buff->skb = skb;
794 tx_buff->data_dma[0] = data_dma_addr;
795 tx_buff->data_len[0] = skb->len;
796 tx_buff->index = index;
797 tx_buff->pool_index = queue_num;
798 tx_buff->last_frag = true;
799 tx_buff->used_bounce = used_bounce;
800
801 memset(&tx_crq, 0, sizeof(tx_crq));
802 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
803 tx_crq.v1.type = IBMVNIC_TX_DESC;
804 tx_crq.v1.n_crq_elem = 1;
805 tx_crq.v1.n_sge = 1;
806 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
807 tx_crq.v1.correlator = cpu_to_be32(index);
808 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
809 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
810 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
811
812 if (adapter->vlan_header_insertion) {
813 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
814 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
815 }
816
817 if (skb->protocol == htons(ETH_P_IP)) {
818 if (ip_hdr(skb)->version == 4)
819 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
820 else if (ip_hdr(skb)->version == 6)
821 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
822
823 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
824 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
825 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
826 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
827 }
828
Thomas Falconad7775d2016-04-01 17:20:34 -0500829 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600830 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500831 hdrs += 2;
832 }
833 /* determine if l2/3/4 headers are sent to firmware */
834 if ((*hdrs >> 7) & 1 &&
835 (skb->protocol == htons(ETH_P_IP) ||
836 skb->protocol == htons(ETH_P_IPV6))) {
837 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
838 tx_crq.v1.n_crq_elem = num_entries;
839 tx_buff->indir_arr[0] = tx_crq;
840 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
841 sizeof(tx_buff->indir_arr),
842 DMA_TO_DEVICE);
843 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
844 if (!firmware_has_feature(FW_FEATURE_CMO))
845 dev_err(dev, "tx: unable to map descriptor array\n");
846 tx_map_failed++;
847 tx_dropped++;
848 ret = NETDEV_TX_BUSY;
849 goto out;
850 }
John Allen498cd8e2016-04-06 11:49:55 -0500851 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500852 (u64)tx_buff->indir_dma,
853 (u64)num_entries);
854 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500855 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
856 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500857 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600858 if (lpar_rc != H_SUCCESS) {
859 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
860
861 if (tx_pool->consumer_index == 0)
862 tx_pool->consumer_index =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600863 adapter->req_tx_entries_per_subcrq - 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600864 else
865 tx_pool->consumer_index--;
866
867 tx_send_failed++;
868 tx_dropped++;
869 ret = NETDEV_TX_BUSY;
870 goto out;
871 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600872
873 atomic_inc(&tx_scrq->used);
874
875 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
876 netdev_info(netdev, "Stopping queue %d\n", queue_num);
877 netif_stop_subqueue(netdev, queue_num);
878 }
879
Thomas Falcon032c5e82015-12-21 11:26:06 -0600880 tx_packets++;
881 tx_bytes += skb->len;
882 txq->trans_start = jiffies;
883 ret = NETDEV_TX_OK;
884
885out:
886 netdev->stats.tx_dropped += tx_dropped;
887 netdev->stats.tx_bytes += tx_bytes;
888 netdev->stats.tx_packets += tx_packets;
889 adapter->tx_send_failed += tx_send_failed;
890 adapter->tx_map_failed += tx_map_failed;
891
892 return ret;
893}
894
895static void ibmvnic_set_multi(struct net_device *netdev)
896{
897 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
898 struct netdev_hw_addr *ha;
899 union ibmvnic_crq crq;
900
901 memset(&crq, 0, sizeof(crq));
902 crq.request_capability.first = IBMVNIC_CRQ_CMD;
903 crq.request_capability.cmd = REQUEST_CAPABILITY;
904
905 if (netdev->flags & IFF_PROMISC) {
906 if (!adapter->promisc_supported)
907 return;
908 } else {
909 if (netdev->flags & IFF_ALLMULTI) {
910 /* Accept all multicast */
911 memset(&crq, 0, sizeof(crq));
912 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
913 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
914 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
915 ibmvnic_send_crq(adapter, &crq);
916 } else if (netdev_mc_empty(netdev)) {
917 /* Reject all multicast */
918 memset(&crq, 0, sizeof(crq));
919 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
920 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
921 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
922 ibmvnic_send_crq(adapter, &crq);
923 } else {
924 /* Accept one or more multicast(s) */
925 netdev_for_each_mc_addr(ha, netdev) {
926 memset(&crq, 0, sizeof(crq));
927 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
928 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
929 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
930 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
931 ha->addr);
932 ibmvnic_send_crq(adapter, &crq);
933 }
934 }
935 }
936}
937
938static int ibmvnic_set_mac(struct net_device *netdev, void *p)
939{
940 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
941 struct sockaddr *addr = p;
942 union ibmvnic_crq crq;
943
944 if (!is_valid_ether_addr(addr->sa_data))
945 return -EADDRNOTAVAIL;
946
947 memset(&crq, 0, sizeof(crq));
948 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
949 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
950 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
951 ibmvnic_send_crq(adapter, &crq);
952 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
953 return 0;
954}
955
Thomas Falcon032c5e82015-12-21 11:26:06 -0600956static void ibmvnic_tx_timeout(struct net_device *dev)
957{
958 struct ibmvnic_adapter *adapter = netdev_priv(dev);
959 int rc;
960
961 /* Adapter timed out, resetting it */
962 release_sub_crqs(adapter);
963 rc = ibmvnic_reset_crq(adapter);
964 if (rc)
965 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
966 else
967 ibmvnic_send_crq_init(adapter);
968}
969
970static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
971 struct ibmvnic_rx_buff *rx_buff)
972{
973 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
974
975 rx_buff->skb = NULL;
976
977 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
978 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
979
980 atomic_dec(&pool->available);
981}
982
983static int ibmvnic_poll(struct napi_struct *napi, int budget)
984{
985 struct net_device *netdev = napi->dev;
986 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
987 int scrq_num = (int)(napi - adapter->napi);
988 int frames_processed = 0;
989restart_poll:
990 while (frames_processed < budget) {
991 struct sk_buff *skb;
992 struct ibmvnic_rx_buff *rx_buff;
993 union sub_crq *next;
994 u32 length;
995 u16 offset;
996 u8 flags = 0;
997
998 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
999 break;
1000 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1001 rx_buff =
1002 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1003 rx_comp.correlator);
1004 /* do error checking */
1005 if (next->rx_comp.rc) {
1006 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1007 /* free the entry */
1008 next->rx_comp.first = 0;
1009 remove_buff_from_pool(adapter, rx_buff);
1010 break;
1011 }
1012
1013 length = be32_to_cpu(next->rx_comp.len);
1014 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1015 flags = next->rx_comp.flags;
1016 skb = rx_buff->skb;
1017 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1018 length);
1019 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1020 /* free the entry */
1021 next->rx_comp.first = 0;
1022 remove_buff_from_pool(adapter, rx_buff);
1023
1024 skb_put(skb, length);
1025 skb->protocol = eth_type_trans(skb, netdev);
1026
1027 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1028 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1029 skb->ip_summed = CHECKSUM_UNNECESSARY;
1030 }
1031
1032 length = skb->len;
1033 napi_gro_receive(napi, skb); /* send it up */
1034 netdev->stats.rx_packets++;
1035 netdev->stats.rx_bytes += length;
1036 frames_processed++;
1037 }
John Allen498cd8e2016-04-06 11:49:55 -05001038 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001039
1040 if (frames_processed < budget) {
1041 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08001042 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001043 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1044 napi_reschedule(napi)) {
1045 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1046 goto restart_poll;
1047 }
1048 }
1049 return frames_processed;
1050}
1051
1052#ifdef CONFIG_NET_POLL_CONTROLLER
1053static void ibmvnic_netpoll_controller(struct net_device *dev)
1054{
1055 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1056 int i;
1057
1058 replenish_pools(netdev_priv(dev));
1059 for (i = 0; i < adapter->req_rx_queues; i++)
1060 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1061 adapter->rx_scrq[i]);
1062}
1063#endif
1064
1065static const struct net_device_ops ibmvnic_netdev_ops = {
1066 .ndo_open = ibmvnic_open,
1067 .ndo_stop = ibmvnic_close,
1068 .ndo_start_xmit = ibmvnic_xmit,
1069 .ndo_set_rx_mode = ibmvnic_set_multi,
1070 .ndo_set_mac_address = ibmvnic_set_mac,
1071 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001072 .ndo_tx_timeout = ibmvnic_tx_timeout,
1073#ifdef CONFIG_NET_POLL_CONTROLLER
1074 .ndo_poll_controller = ibmvnic_netpoll_controller,
1075#endif
1076};
1077
1078/* ethtool functions */
1079
Philippe Reynes8a433792017-01-07 22:37:29 +01001080static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1081 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001082{
Philippe Reynes8a433792017-01-07 22:37:29 +01001083 u32 supported, advertising;
1084
1085 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001086 SUPPORTED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001087 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001088 ADVERTISED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001089 cmd->base.speed = SPEED_1000;
1090 cmd->base.duplex = DUPLEX_FULL;
1091 cmd->base.port = PORT_FIBRE;
1092 cmd->base.phy_address = 0;
1093 cmd->base.autoneg = AUTONEG_ENABLE;
1094
1095 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1096 supported);
1097 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1098 advertising);
1099
Thomas Falcon032c5e82015-12-21 11:26:06 -06001100 return 0;
1101}
1102
1103static void ibmvnic_get_drvinfo(struct net_device *dev,
1104 struct ethtool_drvinfo *info)
1105{
1106 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1107 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1108}
1109
1110static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1111{
1112 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1113
1114 return adapter->msg_enable;
1115}
1116
1117static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1118{
1119 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1120
1121 adapter->msg_enable = data;
1122}
1123
1124static u32 ibmvnic_get_link(struct net_device *netdev)
1125{
1126 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1127
1128 /* Don't need to send a query because we request a logical link up at
1129 * init and then we wait for link state indications
1130 */
1131 return adapter->logical_link_state;
1132}
1133
1134static void ibmvnic_get_ringparam(struct net_device *netdev,
1135 struct ethtool_ringparam *ring)
1136{
1137 ring->rx_max_pending = 0;
1138 ring->tx_max_pending = 0;
1139 ring->rx_mini_max_pending = 0;
1140 ring->rx_jumbo_max_pending = 0;
1141 ring->rx_pending = 0;
1142 ring->tx_pending = 0;
1143 ring->rx_mini_pending = 0;
1144 ring->rx_jumbo_pending = 0;
1145}
1146
1147static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1148{
1149 int i;
1150
1151 if (stringset != ETH_SS_STATS)
1152 return;
1153
1154 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1155 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1156}
1157
1158static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1159{
1160 switch (sset) {
1161 case ETH_SS_STATS:
1162 return ARRAY_SIZE(ibmvnic_stats);
1163 default:
1164 return -EOPNOTSUPP;
1165 }
1166}
1167
1168static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1169 struct ethtool_stats *stats, u64 *data)
1170{
1171 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1172 union ibmvnic_crq crq;
1173 int i;
1174
1175 memset(&crq, 0, sizeof(crq));
1176 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1177 crq.request_statistics.cmd = REQUEST_STATISTICS;
1178 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1179 crq.request_statistics.len =
1180 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001181
1182 /* Wait for data to be written */
1183 init_completion(&adapter->stats_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05001184 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001185 wait_for_completion(&adapter->stats_done);
1186
1187 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1188 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1189}
1190
1191static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001192 .get_drvinfo = ibmvnic_get_drvinfo,
1193 .get_msglevel = ibmvnic_get_msglevel,
1194 .set_msglevel = ibmvnic_set_msglevel,
1195 .get_link = ibmvnic_get_link,
1196 .get_ringparam = ibmvnic_get_ringparam,
1197 .get_strings = ibmvnic_get_strings,
1198 .get_sset_count = ibmvnic_get_sset_count,
1199 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01001200 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001201};
1202
1203/* Routines for managing CRQs/sCRQs */
1204
1205static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1206 struct ibmvnic_sub_crq_queue *scrq)
1207{
1208 struct device *dev = &adapter->vdev->dev;
1209 long rc;
1210
1211 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1212
1213 /* Close the sub-crqs */
1214 do {
1215 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1216 adapter->vdev->unit_address,
1217 scrq->crq_num);
1218 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1219
1220 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1221 DMA_BIDIRECTIONAL);
1222 free_pages((unsigned long)scrq->msgs, 2);
1223 kfree(scrq);
1224}
1225
1226static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1227 *adapter)
1228{
1229 struct device *dev = &adapter->vdev->dev;
1230 struct ibmvnic_sub_crq_queue *scrq;
1231 int rc;
1232
1233 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1234 if (!scrq)
1235 return NULL;
1236
Thomas Falcon12608c22016-10-17 15:28:09 -05001237 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001238 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1239 if (!scrq->msgs) {
1240 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1241 goto zero_page_failed;
1242 }
1243
1244 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1245 DMA_BIDIRECTIONAL);
1246 if (dma_mapping_error(dev, scrq->msg_token)) {
1247 dev_warn(dev, "Couldn't map crq queue messages page\n");
1248 goto map_failed;
1249 }
1250
1251 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1252 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1253
1254 if (rc == H_RESOURCE)
1255 rc = ibmvnic_reset_crq(adapter);
1256
1257 if (rc == H_CLOSED) {
1258 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1259 } else if (rc) {
1260 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1261 goto reg_failed;
1262 }
1263
Thomas Falcon032c5e82015-12-21 11:26:06 -06001264 scrq->adapter = adapter;
1265 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1266 scrq->cur = 0;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001267 atomic_set(&scrq->used, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001268 scrq->rx_skb_top = NULL;
1269 spin_lock_init(&scrq->lock);
1270
1271 netdev_dbg(adapter->netdev,
1272 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1273 scrq->crq_num, scrq->hw_irq, scrq->irq);
1274
1275 return scrq;
1276
Thomas Falcon032c5e82015-12-21 11:26:06 -06001277reg_failed:
1278 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1279 DMA_BIDIRECTIONAL);
1280map_failed:
1281 free_pages((unsigned long)scrq->msgs, 2);
1282zero_page_failed:
1283 kfree(scrq);
1284
1285 return NULL;
1286}
1287
1288static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1289{
1290 int i;
1291
1292 if (adapter->tx_scrq) {
1293 for (i = 0; i < adapter->req_tx_queues; i++)
1294 if (adapter->tx_scrq[i]) {
1295 free_irq(adapter->tx_scrq[i]->irq,
1296 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001297 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001298 release_sub_crq_queue(adapter,
1299 adapter->tx_scrq[i]);
1300 }
1301 adapter->tx_scrq = NULL;
1302 }
1303
1304 if (adapter->rx_scrq) {
1305 for (i = 0; i < adapter->req_rx_queues; i++)
1306 if (adapter->rx_scrq[i]) {
1307 free_irq(adapter->rx_scrq[i]->irq,
1308 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001309 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001310 release_sub_crq_queue(adapter,
1311 adapter->rx_scrq[i]);
1312 }
1313 adapter->rx_scrq = NULL;
1314 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001315}
1316
Thomas Falconea22d512016-07-06 15:35:17 -05001317static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1318{
1319 int i;
1320
1321 if (adapter->tx_scrq) {
1322 for (i = 0; i < adapter->req_tx_queues; i++)
1323 if (adapter->tx_scrq[i])
1324 release_sub_crq_queue(adapter,
1325 adapter->tx_scrq[i]);
1326 adapter->tx_scrq = NULL;
1327 }
1328
1329 if (adapter->rx_scrq) {
1330 for (i = 0; i < adapter->req_rx_queues; i++)
1331 if (adapter->rx_scrq[i])
1332 release_sub_crq_queue(adapter,
1333 adapter->rx_scrq[i]);
1334 adapter->rx_scrq = NULL;
1335 }
Thomas Falconea22d512016-07-06 15:35:17 -05001336}
1337
Thomas Falcon032c5e82015-12-21 11:26:06 -06001338static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1339 struct ibmvnic_sub_crq_queue *scrq)
1340{
1341 struct device *dev = &adapter->vdev->dev;
1342 unsigned long rc;
1343
1344 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1345 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1346 if (rc)
1347 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1348 scrq->hw_irq, rc);
1349 return rc;
1350}
1351
1352static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1353 struct ibmvnic_sub_crq_queue *scrq)
1354{
1355 struct device *dev = &adapter->vdev->dev;
1356 unsigned long rc;
1357
1358 if (scrq->hw_irq > 0x100000000ULL) {
1359 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1360 return 1;
1361 }
1362
1363 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1364 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1365 if (rc)
1366 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1367 scrq->hw_irq, rc);
1368 return rc;
1369}
1370
1371static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1372 struct ibmvnic_sub_crq_queue *scrq)
1373{
1374 struct device *dev = &adapter->vdev->dev;
1375 struct ibmvnic_tx_buff *txbuff;
1376 union sub_crq *next;
1377 int index;
1378 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001379 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001380
1381restart_loop:
1382 while (pending_scrq(adapter, scrq)) {
1383 unsigned int pool = scrq->pool_index;
1384
1385 next = ibmvnic_next_scrq(adapter, scrq);
1386 for (i = 0; i < next->tx_comp.num_comps; i++) {
1387 if (next->tx_comp.rcs[i]) {
1388 dev_err(dev, "tx error %x\n",
1389 next->tx_comp.rcs[i]);
1390 continue;
1391 }
1392 index = be32_to_cpu(next->tx_comp.correlators[i]);
1393 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1394
1395 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1396 if (!txbuff->data_dma[j])
1397 continue;
1398
1399 txbuff->data_dma[j] = 0;
1400 txbuff->used_bounce = false;
1401 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001402 /* if sub_crq was sent indirectly */
1403 first = txbuff->indir_arr[0].generic.first;
1404 if (first == IBMVNIC_CRQ_CMD) {
1405 dma_unmap_single(dev, txbuff->indir_dma,
1406 sizeof(txbuff->indir_arr),
1407 DMA_TO_DEVICE);
1408 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001409
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001410 if (txbuff->last_frag) {
1411 atomic_dec(&scrq->used);
1412
1413 if (atomic_read(&scrq->used) <=
1414 (adapter->req_tx_entries_per_subcrq / 2) &&
1415 netif_subqueue_stopped(adapter->netdev,
1416 txbuff->skb)) {
1417 netif_wake_subqueue(adapter->netdev,
1418 scrq->pool_index);
1419 netdev_dbg(adapter->netdev,
1420 "Started queue %d\n",
1421 scrq->pool_index);
1422 }
1423
Thomas Falcon032c5e82015-12-21 11:26:06 -06001424 dev_kfree_skb_any(txbuff->skb);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001425 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001426
1427 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1428 producer_index] = index;
1429 adapter->tx_pool[pool].producer_index =
1430 (adapter->tx_pool[pool].producer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001431 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001432 }
1433 /* remove tx_comp scrq*/
1434 next->tx_comp.first = 0;
1435 }
1436
1437 enable_scrq_irq(adapter, scrq);
1438
1439 if (pending_scrq(adapter, scrq)) {
1440 disable_scrq_irq(adapter, scrq);
1441 goto restart_loop;
1442 }
1443
1444 return 0;
1445}
1446
1447static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1448{
1449 struct ibmvnic_sub_crq_queue *scrq = instance;
1450 struct ibmvnic_adapter *adapter = scrq->adapter;
1451
1452 disable_scrq_irq(adapter, scrq);
1453 ibmvnic_complete_tx(adapter, scrq);
1454
1455 return IRQ_HANDLED;
1456}
1457
1458static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1459{
1460 struct ibmvnic_sub_crq_queue *scrq = instance;
1461 struct ibmvnic_adapter *adapter = scrq->adapter;
1462
1463 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1464 disable_scrq_irq(adapter, scrq);
1465 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1466 }
1467
1468 return IRQ_HANDLED;
1469}
1470
Thomas Falconea22d512016-07-06 15:35:17 -05001471static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1472{
1473 struct device *dev = &adapter->vdev->dev;
1474 struct ibmvnic_sub_crq_queue *scrq;
1475 int i = 0, j = 0;
1476 int rc = 0;
1477
1478 for (i = 0; i < adapter->req_tx_queues; i++) {
1479 scrq = adapter->tx_scrq[i];
1480 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1481
Michael Ellerman99c17902016-09-10 19:59:05 +10001482 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001483 rc = -EINVAL;
1484 dev_err(dev, "Error mapping irq\n");
1485 goto req_tx_irq_failed;
1486 }
1487
1488 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1489 0, "ibmvnic_tx", scrq);
1490
1491 if (rc) {
1492 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1493 scrq->irq, rc);
1494 irq_dispose_mapping(scrq->irq);
1495 goto req_rx_irq_failed;
1496 }
1497 }
1498
1499 for (i = 0; i < adapter->req_rx_queues; i++) {
1500 scrq = adapter->rx_scrq[i];
1501 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001502 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001503 rc = -EINVAL;
1504 dev_err(dev, "Error mapping irq\n");
1505 goto req_rx_irq_failed;
1506 }
1507 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1508 0, "ibmvnic_rx", scrq);
1509 if (rc) {
1510 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1511 scrq->irq, rc);
1512 irq_dispose_mapping(scrq->irq);
1513 goto req_rx_irq_failed;
1514 }
1515 }
1516 return rc;
1517
1518req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001519 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001520 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1521 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001522 }
Thomas Falconea22d512016-07-06 15:35:17 -05001523 i = adapter->req_tx_queues;
1524req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001525 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001526 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1527 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001528 }
Thomas Falconea22d512016-07-06 15:35:17 -05001529 release_sub_crqs_no_irqs(adapter);
1530 return rc;
1531}
1532
Thomas Falcon032c5e82015-12-21 11:26:06 -06001533static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1534{
1535 struct device *dev = &adapter->vdev->dev;
1536 struct ibmvnic_sub_crq_queue **allqueues;
1537 int registered_queues = 0;
1538 union ibmvnic_crq crq;
1539 int total_queues;
1540 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001541 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001542
1543 if (!retry) {
1544 /* Sub-CRQ entries are 32 byte long */
1545 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1546
1547 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1548 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1549 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1550 goto allqueues_failed;
1551 }
1552
1553 /* Get the minimum between the queried max and the entries
1554 * that fit in our PAGE_SIZE
1555 */
1556 adapter->req_tx_entries_per_subcrq =
1557 adapter->max_tx_entries_per_subcrq > entries_page ?
1558 entries_page : adapter->max_tx_entries_per_subcrq;
1559 adapter->req_rx_add_entries_per_subcrq =
1560 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1561 entries_page : adapter->max_rx_add_entries_per_subcrq;
1562
John Allen6dbcd8f2016-11-07 14:27:28 -06001563 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1564 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
John Allen498cd8e2016-04-06 11:49:55 -05001565 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001566
Thomas Falconf39f0d12017-02-14 10:22:59 -06001567 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001568 }
1569
1570 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1571
1572 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1573 if (!allqueues)
1574 goto allqueues_failed;
1575
1576 for (i = 0; i < total_queues; i++) {
1577 allqueues[i] = init_sub_crq_queue(adapter);
1578 if (!allqueues[i]) {
1579 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1580 break;
1581 }
1582 registered_queues++;
1583 }
1584
1585 /* Make sure we were able to register the minimum number of queues */
1586 if (registered_queues <
1587 adapter->min_tx_queues + adapter->min_rx_queues) {
1588 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1589 goto tx_failed;
1590 }
1591
1592 /* Distribute the failed allocated queues*/
1593 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1594 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1595 switch (i % 3) {
1596 case 0:
1597 if (adapter->req_rx_queues > adapter->min_rx_queues)
1598 adapter->req_rx_queues--;
1599 else
1600 more++;
1601 break;
1602 case 1:
1603 if (adapter->req_tx_queues > adapter->min_tx_queues)
1604 adapter->req_tx_queues--;
1605 else
1606 more++;
1607 break;
1608 }
1609 }
1610
1611 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1612 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1613 if (!adapter->tx_scrq)
1614 goto tx_failed;
1615
1616 for (i = 0; i < adapter->req_tx_queues; i++) {
1617 adapter->tx_scrq[i] = allqueues[i];
1618 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001619 }
1620
1621 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1622 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1623 if (!adapter->rx_scrq)
1624 goto rx_failed;
1625
1626 for (i = 0; i < adapter->req_rx_queues; i++) {
1627 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1628 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001629 }
1630
1631 memset(&crq, 0, sizeof(crq));
1632 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1633 crq.request_capability.cmd = REQUEST_CAPABILITY;
1634
1635 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001636 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001637 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001638 ibmvnic_send_crq(adapter, &crq);
1639
1640 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001641 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001642 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001643 ibmvnic_send_crq(adapter, &crq);
1644
1645 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001646 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001647 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001648 ibmvnic_send_crq(adapter, &crq);
1649
1650 crq.request_capability.capability =
1651 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1652 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001653 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001654 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001655 ibmvnic_send_crq(adapter, &crq);
1656
1657 crq.request_capability.capability =
1658 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1659 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001660 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001661 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001662 ibmvnic_send_crq(adapter, &crq);
1663
1664 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001665 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06001666 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001667 ibmvnic_send_crq(adapter, &crq);
1668
1669 if (adapter->netdev->flags & IFF_PROMISC) {
1670 if (adapter->promisc_supported) {
1671 crq.request_capability.capability =
1672 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001673 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06001674 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001675 ibmvnic_send_crq(adapter, &crq);
1676 }
1677 } else {
1678 crq.request_capability.capability =
1679 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001680 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06001681 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001682 ibmvnic_send_crq(adapter, &crq);
1683 }
1684
1685 kfree(allqueues);
1686
1687 return;
1688
Thomas Falcon032c5e82015-12-21 11:26:06 -06001689rx_failed:
1690 kfree(adapter->tx_scrq);
1691 adapter->tx_scrq = NULL;
1692tx_failed:
1693 for (i = 0; i < registered_queues; i++)
1694 release_sub_crq_queue(adapter, allqueues[i]);
1695 kfree(allqueues);
1696allqueues_failed:
1697 ibmvnic_remove(adapter->vdev);
1698}
1699
1700static int pending_scrq(struct ibmvnic_adapter *adapter,
1701 struct ibmvnic_sub_crq_queue *scrq)
1702{
1703 union sub_crq *entry = &scrq->msgs[scrq->cur];
1704
1705 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1706 return 1;
1707 else
1708 return 0;
1709}
1710
1711static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1712 struct ibmvnic_sub_crq_queue *scrq)
1713{
1714 union sub_crq *entry;
1715 unsigned long flags;
1716
1717 spin_lock_irqsave(&scrq->lock, flags);
1718 entry = &scrq->msgs[scrq->cur];
1719 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1720 if (++scrq->cur == scrq->size)
1721 scrq->cur = 0;
1722 } else {
1723 entry = NULL;
1724 }
1725 spin_unlock_irqrestore(&scrq->lock, flags);
1726
1727 return entry;
1728}
1729
1730static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1731{
1732 struct ibmvnic_crq_queue *queue = &adapter->crq;
1733 union ibmvnic_crq *crq;
1734
1735 crq = &queue->msgs[queue->cur];
1736 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1737 if (++queue->cur == queue->size)
1738 queue->cur = 0;
1739 } else {
1740 crq = NULL;
1741 }
1742
1743 return crq;
1744}
1745
1746static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1747 union sub_crq *sub_crq)
1748{
1749 unsigned int ua = adapter->vdev->unit_address;
1750 struct device *dev = &adapter->vdev->dev;
1751 u64 *u64_crq = (u64 *)sub_crq;
1752 int rc;
1753
1754 netdev_dbg(adapter->netdev,
1755 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1756 (unsigned long int)cpu_to_be64(remote_handle),
1757 (unsigned long int)cpu_to_be64(u64_crq[0]),
1758 (unsigned long int)cpu_to_be64(u64_crq[1]),
1759 (unsigned long int)cpu_to_be64(u64_crq[2]),
1760 (unsigned long int)cpu_to_be64(u64_crq[3]));
1761
1762 /* Make sure the hypervisor sees the complete request */
1763 mb();
1764
1765 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1766 cpu_to_be64(remote_handle),
1767 cpu_to_be64(u64_crq[0]),
1768 cpu_to_be64(u64_crq[1]),
1769 cpu_to_be64(u64_crq[2]),
1770 cpu_to_be64(u64_crq[3]));
1771
1772 if (rc) {
1773 if (rc == H_CLOSED)
1774 dev_warn(dev, "CRQ Queue closed\n");
1775 dev_err(dev, "Send error (rc=%d)\n", rc);
1776 }
1777
1778 return rc;
1779}
1780
Thomas Falconad7775d2016-04-01 17:20:34 -05001781static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1782 u64 remote_handle, u64 ioba, u64 num_entries)
1783{
1784 unsigned int ua = adapter->vdev->unit_address;
1785 struct device *dev = &adapter->vdev->dev;
1786 int rc;
1787
1788 /* Make sure the hypervisor sees the complete request */
1789 mb();
1790 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1791 cpu_to_be64(remote_handle),
1792 ioba, num_entries);
1793
1794 if (rc) {
1795 if (rc == H_CLOSED)
1796 dev_warn(dev, "CRQ Queue closed\n");
1797 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1798 }
1799
1800 return rc;
1801}
1802
Thomas Falcon032c5e82015-12-21 11:26:06 -06001803static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1804 union ibmvnic_crq *crq)
1805{
1806 unsigned int ua = adapter->vdev->unit_address;
1807 struct device *dev = &adapter->vdev->dev;
1808 u64 *u64_crq = (u64 *)crq;
1809 int rc;
1810
1811 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1812 (unsigned long int)cpu_to_be64(u64_crq[0]),
1813 (unsigned long int)cpu_to_be64(u64_crq[1]));
1814
1815 /* Make sure the hypervisor sees the complete request */
1816 mb();
1817
1818 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1819 cpu_to_be64(u64_crq[0]),
1820 cpu_to_be64(u64_crq[1]));
1821
1822 if (rc) {
1823 if (rc == H_CLOSED)
1824 dev_warn(dev, "CRQ Queue closed\n");
1825 dev_warn(dev, "Send error (rc=%d)\n", rc);
1826 }
1827
1828 return rc;
1829}
1830
1831static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1832{
1833 union ibmvnic_crq crq;
1834
1835 memset(&crq, 0, sizeof(crq));
1836 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1837 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1838 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1839
1840 return ibmvnic_send_crq(adapter, &crq);
1841}
1842
1843static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1844{
1845 union ibmvnic_crq crq;
1846
1847 memset(&crq, 0, sizeof(crq));
1848 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1849 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1850 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1851
1852 return ibmvnic_send_crq(adapter, &crq);
1853}
1854
1855static int send_version_xchg(struct ibmvnic_adapter *adapter)
1856{
1857 union ibmvnic_crq crq;
1858
1859 memset(&crq, 0, sizeof(crq));
1860 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1861 crq.version_exchange.cmd = VERSION_EXCHANGE;
1862 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1863
1864 return ibmvnic_send_crq(adapter, &crq);
1865}
1866
1867static void send_login(struct ibmvnic_adapter *adapter)
1868{
1869 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1870 struct ibmvnic_login_buffer *login_buffer;
1871 struct ibmvnic_inflight_cmd *inflight_cmd;
1872 struct device *dev = &adapter->vdev->dev;
1873 dma_addr_t rsp_buffer_token;
1874 dma_addr_t buffer_token;
1875 size_t rsp_buffer_size;
1876 union ibmvnic_crq crq;
1877 unsigned long flags;
1878 size_t buffer_size;
1879 __be64 *tx_list_p;
1880 __be64 *rx_list_p;
1881 int i;
1882
1883 buffer_size =
1884 sizeof(struct ibmvnic_login_buffer) +
1885 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1886
1887 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1888 if (!login_buffer)
1889 goto buf_alloc_failed;
1890
1891 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1892 DMA_TO_DEVICE);
1893 if (dma_mapping_error(dev, buffer_token)) {
1894 dev_err(dev, "Couldn't map login buffer\n");
1895 goto buf_map_failed;
1896 }
1897
John Allen498cd8e2016-04-06 11:49:55 -05001898 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1899 sizeof(u64) * adapter->req_tx_queues +
1900 sizeof(u64) * adapter->req_rx_queues +
1901 sizeof(u64) * adapter->req_rx_queues +
1902 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001903
1904 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1905 if (!login_rsp_buffer)
1906 goto buf_rsp_alloc_failed;
1907
1908 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1909 rsp_buffer_size, DMA_FROM_DEVICE);
1910 if (dma_mapping_error(dev, rsp_buffer_token)) {
1911 dev_err(dev, "Couldn't map login rsp buffer\n");
1912 goto buf_rsp_map_failed;
1913 }
1914 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1915 if (!inflight_cmd) {
1916 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1917 goto inflight_alloc_failed;
1918 }
1919 adapter->login_buf = login_buffer;
1920 adapter->login_buf_token = buffer_token;
1921 adapter->login_buf_sz = buffer_size;
1922 adapter->login_rsp_buf = login_rsp_buffer;
1923 adapter->login_rsp_buf_token = rsp_buffer_token;
1924 adapter->login_rsp_buf_sz = rsp_buffer_size;
1925
1926 login_buffer->len = cpu_to_be32(buffer_size);
1927 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1928 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1929 login_buffer->off_txcomp_subcrqs =
1930 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1931 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1932 login_buffer->off_rxcomp_subcrqs =
1933 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1934 sizeof(u64) * adapter->req_tx_queues);
1935 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1936 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1937
1938 tx_list_p = (__be64 *)((char *)login_buffer +
1939 sizeof(struct ibmvnic_login_buffer));
1940 rx_list_p = (__be64 *)((char *)login_buffer +
1941 sizeof(struct ibmvnic_login_buffer) +
1942 sizeof(u64) * adapter->req_tx_queues);
1943
1944 for (i = 0; i < adapter->req_tx_queues; i++) {
1945 if (adapter->tx_scrq[i]) {
1946 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1947 crq_num);
1948 }
1949 }
1950
1951 for (i = 0; i < adapter->req_rx_queues; i++) {
1952 if (adapter->rx_scrq[i]) {
1953 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1954 crq_num);
1955 }
1956 }
1957
1958 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1959 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1960 netdev_dbg(adapter->netdev, "%016lx\n",
1961 ((unsigned long int *)(adapter->login_buf))[i]);
1962 }
1963
1964 memset(&crq, 0, sizeof(crq));
1965 crq.login.first = IBMVNIC_CRQ_CMD;
1966 crq.login.cmd = LOGIN;
1967 crq.login.ioba = cpu_to_be32(buffer_token);
1968 crq.login.len = cpu_to_be32(buffer_size);
1969
1970 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1971
1972 spin_lock_irqsave(&adapter->inflight_lock, flags);
1973 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1974 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1975
1976 ibmvnic_send_crq(adapter, &crq);
1977
1978 return;
1979
1980inflight_alloc_failed:
1981 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1982 DMA_FROM_DEVICE);
1983buf_rsp_map_failed:
1984 kfree(login_rsp_buffer);
1985buf_rsp_alloc_failed:
1986 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1987buf_map_failed:
1988 kfree(login_buffer);
1989buf_alloc_failed:
1990 return;
1991}
1992
1993static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1994 u32 len, u8 map_id)
1995{
1996 union ibmvnic_crq crq;
1997
1998 memset(&crq, 0, sizeof(crq));
1999 crq.request_map.first = IBMVNIC_CRQ_CMD;
2000 crq.request_map.cmd = REQUEST_MAP;
2001 crq.request_map.map_id = map_id;
2002 crq.request_map.ioba = cpu_to_be32(addr);
2003 crq.request_map.len = cpu_to_be32(len);
2004 ibmvnic_send_crq(adapter, &crq);
2005}
2006
2007static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2008{
2009 union ibmvnic_crq crq;
2010
2011 memset(&crq, 0, sizeof(crq));
2012 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2013 crq.request_unmap.cmd = REQUEST_UNMAP;
2014 crq.request_unmap.map_id = map_id;
2015 ibmvnic_send_crq(adapter, &crq);
2016}
2017
2018static void send_map_query(struct ibmvnic_adapter *adapter)
2019{
2020 union ibmvnic_crq crq;
2021
2022 memset(&crq, 0, sizeof(crq));
2023 crq.query_map.first = IBMVNIC_CRQ_CMD;
2024 crq.query_map.cmd = QUERY_MAP;
2025 ibmvnic_send_crq(adapter, &crq);
2026}
2027
2028/* Send a series of CRQs requesting various capabilities of the VNIC server */
2029static void send_cap_queries(struct ibmvnic_adapter *adapter)
2030{
2031 union ibmvnic_crq crq;
2032
Thomas Falcon901e0402017-02-15 12:17:59 -06002033 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002034 memset(&crq, 0, sizeof(crq));
2035 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2036 crq.query_capability.cmd = QUERY_CAPABILITY;
2037
2038 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002039 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002040 ibmvnic_send_crq(adapter, &crq);
2041
2042 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002043 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002044 ibmvnic_send_crq(adapter, &crq);
2045
2046 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002047 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002048 ibmvnic_send_crq(adapter, &crq);
2049
2050 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002051 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002052 ibmvnic_send_crq(adapter, &crq);
2053
2054 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002055 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002056 ibmvnic_send_crq(adapter, &crq);
2057
2058 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002059 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002060 ibmvnic_send_crq(adapter, &crq);
2061
2062 crq.query_capability.capability =
2063 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002064 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002065 ibmvnic_send_crq(adapter, &crq);
2066
2067 crq.query_capability.capability =
2068 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002069 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002070 ibmvnic_send_crq(adapter, &crq);
2071
2072 crq.query_capability.capability =
2073 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002074 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002075 ibmvnic_send_crq(adapter, &crq);
2076
2077 crq.query_capability.capability =
2078 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002079 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002080 ibmvnic_send_crq(adapter, &crq);
2081
2082 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06002083 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002084 ibmvnic_send_crq(adapter, &crq);
2085
2086 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002087 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002088 ibmvnic_send_crq(adapter, &crq);
2089
2090 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002091 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002092 ibmvnic_send_crq(adapter, &crq);
2093
2094 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002095 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002096 ibmvnic_send_crq(adapter, &crq);
2097
2098 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06002099 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002100 ibmvnic_send_crq(adapter, &crq);
2101
2102 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06002103 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002104 ibmvnic_send_crq(adapter, &crq);
2105
2106 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002107 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002108 ibmvnic_send_crq(adapter, &crq);
2109
2110 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002111 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002112 ibmvnic_send_crq(adapter, &crq);
2113
2114 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002115 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002116 ibmvnic_send_crq(adapter, &crq);
2117
2118 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002119 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002120 ibmvnic_send_crq(adapter, &crq);
2121
2122 crq.query_capability.capability =
2123 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06002124 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002125 ibmvnic_send_crq(adapter, &crq);
2126
2127 crq.query_capability.capability =
2128 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002129 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002130 ibmvnic_send_crq(adapter, &crq);
2131
2132 crq.query_capability.capability =
2133 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002134 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002135 ibmvnic_send_crq(adapter, &crq);
2136
2137 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002138 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002139 ibmvnic_send_crq(adapter, &crq);
2140}
2141
2142static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2143{
2144 struct device *dev = &adapter->vdev->dev;
2145 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2146 union ibmvnic_crq crq;
2147 int i;
2148
2149 dma_unmap_single(dev, adapter->ip_offload_tok,
2150 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2151
2152 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2153 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2154 netdev_dbg(adapter->netdev, "%016lx\n",
2155 ((unsigned long int *)(buf))[i]);
2156
2157 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2158 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2159 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2160 buf->tcp_ipv4_chksum);
2161 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2162 buf->tcp_ipv6_chksum);
2163 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2164 buf->udp_ipv4_chksum);
2165 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2166 buf->udp_ipv6_chksum);
2167 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2168 buf->large_tx_ipv4);
2169 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2170 buf->large_tx_ipv6);
2171 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2172 buf->large_rx_ipv4);
2173 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2174 buf->large_rx_ipv6);
2175 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2176 buf->max_ipv4_header_size);
2177 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2178 buf->max_ipv6_header_size);
2179 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2180 buf->max_tcp_header_size);
2181 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2182 buf->max_udp_header_size);
2183 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2184 buf->max_large_tx_size);
2185 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2186 buf->max_large_rx_size);
2187 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2188 buf->ipv6_extension_header);
2189 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2190 buf->tcp_pseudosum_req);
2191 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2192 buf->num_ipv6_ext_headers);
2193 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2194 buf->off_ipv6_ext_headers);
2195
2196 adapter->ip_offload_ctrl_tok =
2197 dma_map_single(dev, &adapter->ip_offload_ctrl,
2198 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2199
2200 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2201 dev_err(dev, "Couldn't map ip offload control buffer\n");
2202 return;
2203 }
2204
2205 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2206 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2207 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2208 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2209 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2210
2211 /* large_tx/rx disabled for now, additional features needed */
2212 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2213 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2214 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2215 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2216
2217 adapter->netdev->features = NETIF_F_GSO;
2218
2219 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2220 adapter->netdev->features |= NETIF_F_IP_CSUM;
2221
2222 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2223 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2224
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002225 if ((adapter->netdev->features &
2226 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2227 adapter->netdev->features |= NETIF_F_RXCSUM;
2228
Thomas Falcon032c5e82015-12-21 11:26:06 -06002229 memset(&crq, 0, sizeof(crq));
2230 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2231 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2232 crq.control_ip_offload.len =
2233 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2234 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2235 ibmvnic_send_crq(adapter, &crq);
2236}
2237
2238static void handle_error_info_rsp(union ibmvnic_crq *crq,
2239 struct ibmvnic_adapter *adapter)
2240{
2241 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002242 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002243 unsigned long flags;
2244 bool found = false;
2245 int i;
2246
2247 if (!crq->request_error_rsp.rc.code) {
2248 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2249 crq->request_error_rsp.rc.code);
2250 return;
2251 }
2252
2253 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002254 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002255 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2256 found = true;
2257 list_del(&error_buff->list);
2258 break;
2259 }
2260 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2261
2262 if (!found) {
2263 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falcon75224c92017-02-15 10:33:33 -06002264 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002265 return;
2266 }
2267
2268 dev_err(dev, "Detailed info for error id %x:",
Thomas Falcon75224c92017-02-15 10:33:33 -06002269 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002270
2271 for (i = 0; i < error_buff->len; i++) {
2272 pr_cont("%02x", (int)error_buff->buff[i]);
2273 if (i % 8 == 7)
2274 pr_cont(" ");
2275 }
2276 pr_cont("\n");
2277
2278 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2279 DMA_FROM_DEVICE);
2280 kfree(error_buff->buff);
2281 kfree(error_buff);
2282}
2283
2284static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2285 struct ibmvnic_adapter *adapter)
2286{
2287 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2288 struct ibmvnic_inflight_cmd *inflight_cmd;
2289 struct device *dev = &adapter->vdev->dev;
2290 union ibmvnic_crq newcrq;
2291 unsigned long flags;
2292
2293 /* allocate and map buffer */
2294 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2295 if (!adapter->dump_data) {
2296 complete(&adapter->fw_done);
2297 return;
2298 }
2299
2300 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2301 DMA_FROM_DEVICE);
2302
2303 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2304 if (!firmware_has_feature(FW_FEATURE_CMO))
2305 dev_err(dev, "Couldn't map dump data\n");
2306 kfree(adapter->dump_data);
2307 complete(&adapter->fw_done);
2308 return;
2309 }
2310
2311 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2312 if (!inflight_cmd) {
2313 dma_unmap_single(dev, adapter->dump_data_token, len,
2314 DMA_FROM_DEVICE);
2315 kfree(adapter->dump_data);
2316 complete(&adapter->fw_done);
2317 return;
2318 }
2319
2320 memset(&newcrq, 0, sizeof(newcrq));
2321 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2322 newcrq.request_dump.cmd = REQUEST_DUMP;
2323 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2324 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2325
2326 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2327
2328 spin_lock_irqsave(&adapter->inflight_lock, flags);
2329 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2330 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2331
2332 ibmvnic_send_crq(adapter, &newcrq);
2333}
2334
2335static void handle_error_indication(union ibmvnic_crq *crq,
2336 struct ibmvnic_adapter *adapter)
2337{
2338 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2339 struct ibmvnic_inflight_cmd *inflight_cmd;
2340 struct device *dev = &adapter->vdev->dev;
2341 struct ibmvnic_error_buff *error_buff;
2342 union ibmvnic_crq new_crq;
2343 unsigned long flags;
2344
2345 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2346 crq->error_indication.
2347 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
Thomas Falcon75224c92017-02-15 10:33:33 -06002348 be32_to_cpu(crq->error_indication.error_id),
2349 be16_to_cpu(crq->error_indication.error_cause));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002350
2351 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2352 if (!error_buff)
2353 return;
2354
2355 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2356 if (!error_buff->buff) {
2357 kfree(error_buff);
2358 return;
2359 }
2360
2361 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2362 DMA_FROM_DEVICE);
2363 if (dma_mapping_error(dev, error_buff->dma)) {
2364 if (!firmware_has_feature(FW_FEATURE_CMO))
2365 dev_err(dev, "Couldn't map error buffer\n");
2366 kfree(error_buff->buff);
2367 kfree(error_buff);
2368 return;
2369 }
2370
2371 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2372 if (!inflight_cmd) {
2373 dma_unmap_single(dev, error_buff->dma, detail_len,
2374 DMA_FROM_DEVICE);
2375 kfree(error_buff->buff);
2376 kfree(error_buff);
2377 return;
2378 }
2379
2380 error_buff->len = detail_len;
2381 error_buff->error_id = crq->error_indication.error_id;
2382
2383 spin_lock_irqsave(&adapter->error_list_lock, flags);
2384 list_add_tail(&error_buff->list, &adapter->errors);
2385 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2386
2387 memset(&new_crq, 0, sizeof(new_crq));
2388 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2389 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2390 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2391 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2392 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2393
2394 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2395
2396 spin_lock_irqsave(&adapter->inflight_lock, flags);
2397 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2398 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2399
2400 ibmvnic_send_crq(adapter, &new_crq);
2401}
2402
2403static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2404 struct ibmvnic_adapter *adapter)
2405{
2406 struct net_device *netdev = adapter->netdev;
2407 struct device *dev = &adapter->vdev->dev;
2408 long rc;
2409
2410 rc = crq->change_mac_addr_rsp.rc.code;
2411 if (rc) {
2412 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2413 return;
2414 }
2415 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2416 ETH_ALEN);
2417}
2418
2419static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2420 struct ibmvnic_adapter *adapter)
2421{
2422 struct device *dev = &adapter->vdev->dev;
2423 u64 *req_value;
2424 char *name;
2425
Thomas Falcon901e0402017-02-15 12:17:59 -06002426 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002427 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2428 case REQ_TX_QUEUES:
2429 req_value = &adapter->req_tx_queues;
2430 name = "tx";
2431 break;
2432 case REQ_RX_QUEUES:
2433 req_value = &adapter->req_rx_queues;
2434 name = "rx";
2435 break;
2436 case REQ_RX_ADD_QUEUES:
2437 req_value = &adapter->req_rx_add_queues;
2438 name = "rx_add";
2439 break;
2440 case REQ_TX_ENTRIES_PER_SUBCRQ:
2441 req_value = &adapter->req_tx_entries_per_subcrq;
2442 name = "tx_entries_per_subcrq";
2443 break;
2444 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2445 req_value = &adapter->req_rx_add_entries_per_subcrq;
2446 name = "rx_add_entries_per_subcrq";
2447 break;
2448 case REQ_MTU:
2449 req_value = &adapter->req_mtu;
2450 name = "mtu";
2451 break;
2452 case PROMISC_REQUESTED:
2453 req_value = &adapter->promisc;
2454 name = "promisc";
2455 break;
2456 default:
2457 dev_err(dev, "Got invalid cap request rsp %d\n",
2458 crq->request_capability.capability);
2459 return;
2460 }
2461
2462 switch (crq->request_capability_rsp.rc.code) {
2463 case SUCCESS:
2464 break;
2465 case PARTIALSUCCESS:
2466 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2467 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06002468 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06002469 number), name);
Thomas Falconea22d512016-07-06 15:35:17 -05002470 release_sub_crqs_no_irqs(adapter);
Thomas Falcon28f4d162017-02-15 10:32:11 -06002471 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
Thomas Falconea22d512016-07-06 15:35:17 -05002472 init_sub_crqs(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002473 return;
2474 default:
2475 dev_err(dev, "Error %d in request cap rsp\n",
2476 crq->request_capability_rsp.rc.code);
2477 return;
2478 }
2479
2480 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06002481 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002482 union ibmvnic_crq newcrq;
2483 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2484 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2485 &adapter->ip_offload_buf;
2486
Thomas Falcon249168a2017-02-15 12:18:00 -06002487 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002488 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2489 buf_sz,
2490 DMA_FROM_DEVICE);
2491
2492 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2493 if (!firmware_has_feature(FW_FEATURE_CMO))
2494 dev_err(dev, "Couldn't map offload buffer\n");
2495 return;
2496 }
2497
2498 memset(&newcrq, 0, sizeof(newcrq));
2499 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2500 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2501 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2502 newcrq.query_ip_offload.ioba =
2503 cpu_to_be32(adapter->ip_offload_tok);
2504
2505 ibmvnic_send_crq(adapter, &newcrq);
2506 }
2507}
2508
2509static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2510 struct ibmvnic_adapter *adapter)
2511{
2512 struct device *dev = &adapter->vdev->dev;
2513 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2514 struct ibmvnic_login_buffer *login = adapter->login_buf;
2515 union ibmvnic_crq crq;
2516 int i;
2517
2518 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2519 DMA_BIDIRECTIONAL);
2520 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2521 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2522
John Allen498cd8e2016-04-06 11:49:55 -05002523 /* If the number of queues requested can't be allocated by the
2524 * server, the login response will return with code 1. We will need
2525 * to resend the login buffer with fewer queues requested.
2526 */
2527 if (login_rsp_crq->generic.rc.code) {
2528 adapter->renegotiate = true;
2529 complete(&adapter->init_done);
2530 return 0;
2531 }
2532
Thomas Falcon032c5e82015-12-21 11:26:06 -06002533 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2534 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2535 netdev_dbg(adapter->netdev, "%016lx\n",
2536 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2537 }
2538
2539 /* Sanity checks */
2540 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2541 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2542 adapter->req_rx_add_queues !=
2543 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2544 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2545 ibmvnic_remove(adapter->vdev);
2546 return -EIO;
2547 }
2548 complete(&adapter->init_done);
2549
2550 memset(&crq, 0, sizeof(crq));
2551 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2552 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2553 ibmvnic_send_crq(adapter, &crq);
2554
2555 return 0;
2556}
2557
2558static void handle_request_map_rsp(union ibmvnic_crq *crq,
2559 struct ibmvnic_adapter *adapter)
2560{
2561 struct device *dev = &adapter->vdev->dev;
2562 u8 map_id = crq->request_map_rsp.map_id;
2563 int tx_subcrqs;
2564 int rx_subcrqs;
2565 long rc;
2566 int i;
2567
2568 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2569 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2570
2571 rc = crq->request_map_rsp.rc.code;
2572 if (rc) {
2573 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2574 adapter->map_id--;
2575 /* need to find and zero tx/rx_pool map_id */
2576 for (i = 0; i < tx_subcrqs; i++) {
2577 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2578 adapter->tx_pool[i].long_term_buff.map_id = 0;
2579 }
2580 for (i = 0; i < rx_subcrqs; i++) {
2581 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2582 adapter->rx_pool[i].long_term_buff.map_id = 0;
2583 }
2584 }
2585 complete(&adapter->fw_done);
2586}
2587
2588static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2589 struct ibmvnic_adapter *adapter)
2590{
2591 struct device *dev = &adapter->vdev->dev;
2592 long rc;
2593
2594 rc = crq->request_unmap_rsp.rc.code;
2595 if (rc)
2596 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2597}
2598
2599static void handle_query_map_rsp(union ibmvnic_crq *crq,
2600 struct ibmvnic_adapter *adapter)
2601{
2602 struct net_device *netdev = adapter->netdev;
2603 struct device *dev = &adapter->vdev->dev;
2604 long rc;
2605
2606 rc = crq->query_map_rsp.rc.code;
2607 if (rc) {
2608 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2609 return;
2610 }
2611 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2612 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2613 crq->query_map_rsp.free_pages);
2614}
2615
2616static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2617 struct ibmvnic_adapter *adapter)
2618{
2619 struct net_device *netdev = adapter->netdev;
2620 struct device *dev = &adapter->vdev->dev;
2621 long rc;
2622
Thomas Falcon901e0402017-02-15 12:17:59 -06002623 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002624 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06002625 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002626 rc = crq->query_capability.rc.code;
2627 if (rc) {
2628 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2629 goto out;
2630 }
2631
2632 switch (be16_to_cpu(crq->query_capability.capability)) {
2633 case MIN_TX_QUEUES:
2634 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002635 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002636 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2637 adapter->min_tx_queues);
2638 break;
2639 case MIN_RX_QUEUES:
2640 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002641 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002642 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2643 adapter->min_rx_queues);
2644 break;
2645 case MIN_RX_ADD_QUEUES:
2646 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002647 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002648 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2649 adapter->min_rx_add_queues);
2650 break;
2651 case MAX_TX_QUEUES:
2652 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002653 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002654 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2655 adapter->max_tx_queues);
2656 break;
2657 case MAX_RX_QUEUES:
2658 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002659 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002660 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2661 adapter->max_rx_queues);
2662 break;
2663 case MAX_RX_ADD_QUEUES:
2664 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002665 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002666 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2667 adapter->max_rx_add_queues);
2668 break;
2669 case MIN_TX_ENTRIES_PER_SUBCRQ:
2670 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002671 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002672 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2673 adapter->min_tx_entries_per_subcrq);
2674 break;
2675 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2676 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002677 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002678 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2679 adapter->min_rx_add_entries_per_subcrq);
2680 break;
2681 case MAX_TX_ENTRIES_PER_SUBCRQ:
2682 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002683 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002684 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2685 adapter->max_tx_entries_per_subcrq);
2686 break;
2687 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2688 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002689 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002690 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2691 adapter->max_rx_add_entries_per_subcrq);
2692 break;
2693 case TCP_IP_OFFLOAD:
2694 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002695 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002696 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2697 adapter->tcp_ip_offload);
2698 break;
2699 case PROMISC_SUPPORTED:
2700 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002701 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002702 netdev_dbg(netdev, "promisc_supported = %lld\n",
2703 adapter->promisc_supported);
2704 break;
2705 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002706 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002707 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002708 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2709 break;
2710 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002711 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002712 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002713 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2714 break;
2715 case MAX_MULTICAST_FILTERS:
2716 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002717 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002718 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2719 adapter->max_multicast_filters);
2720 break;
2721 case VLAN_HEADER_INSERTION:
2722 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002723 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002724 if (adapter->vlan_header_insertion)
2725 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2726 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2727 adapter->vlan_header_insertion);
2728 break;
2729 case MAX_TX_SG_ENTRIES:
2730 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002731 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002732 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2733 adapter->max_tx_sg_entries);
2734 break;
2735 case RX_SG_SUPPORTED:
2736 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002737 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002738 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2739 adapter->rx_sg_supported);
2740 break;
2741 case OPT_TX_COMP_SUB_QUEUES:
2742 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002743 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002744 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2745 adapter->opt_tx_comp_sub_queues);
2746 break;
2747 case OPT_RX_COMP_QUEUES:
2748 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002749 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002750 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2751 adapter->opt_rx_comp_queues);
2752 break;
2753 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2754 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002755 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002756 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2757 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2758 break;
2759 case OPT_TX_ENTRIES_PER_SUBCRQ:
2760 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002761 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002762 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2763 adapter->opt_tx_entries_per_subcrq);
2764 break;
2765 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2766 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002767 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002768 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2769 adapter->opt_rxba_entries_per_subcrq);
2770 break;
2771 case TX_RX_DESC_REQ:
2772 adapter->tx_rx_desc_req = crq->query_capability.number;
2773 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2774 adapter->tx_rx_desc_req);
2775 break;
2776
2777 default:
2778 netdev_err(netdev, "Got invalid cap rsp %d\n",
2779 crq->query_capability.capability);
2780 }
2781
2782out:
Thomas Falcon249168a2017-02-15 12:18:00 -06002783 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2784 adapter->wait_capability = false;
Thomas Falconea22d512016-07-06 15:35:17 -05002785 init_sub_crqs(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002786 /* We're done querying the capabilities, initialize sub-crqs */
Thomas Falcon249168a2017-02-15 12:18:00 -06002787 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002788}
2789
2790static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2791 struct ibmvnic_adapter *adapter)
2792{
2793 u8 correlator = crq->control_ras_rsp.correlator;
2794 struct device *dev = &adapter->vdev->dev;
2795 bool found = false;
2796 int i;
2797
2798 if (crq->control_ras_rsp.rc.code) {
2799 dev_warn(dev, "Control ras failed rc=%d\n",
2800 crq->control_ras_rsp.rc.code);
2801 return;
2802 }
2803
2804 for (i = 0; i < adapter->ras_comp_num; i++) {
2805 if (adapter->ras_comps[i].correlator == correlator) {
2806 found = true;
2807 break;
2808 }
2809 }
2810
2811 if (!found) {
2812 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2813 return;
2814 }
2815
2816 switch (crq->control_ras_rsp.op) {
2817 case IBMVNIC_TRACE_LEVEL:
2818 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2819 break;
2820 case IBMVNIC_ERROR_LEVEL:
2821 adapter->ras_comps[i].error_check_level =
2822 crq->control_ras.level;
2823 break;
2824 case IBMVNIC_TRACE_PAUSE:
2825 adapter->ras_comp_int[i].paused = 1;
2826 break;
2827 case IBMVNIC_TRACE_RESUME:
2828 adapter->ras_comp_int[i].paused = 0;
2829 break;
2830 case IBMVNIC_TRACE_ON:
2831 adapter->ras_comps[i].trace_on = 1;
2832 break;
2833 case IBMVNIC_TRACE_OFF:
2834 adapter->ras_comps[i].trace_on = 0;
2835 break;
2836 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2837 /* trace_buff_sz is 3 bytes, stuff it into an int */
2838 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2839 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2840 crq->control_ras_rsp.trace_buff_sz[0];
2841 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2842 crq->control_ras_rsp.trace_buff_sz[1];
2843 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2844 crq->control_ras_rsp.trace_buff_sz[2];
2845 break;
2846 default:
2847 dev_err(dev, "invalid op %d on control_ras_rsp",
2848 crq->control_ras_rsp.op);
2849 }
2850}
2851
Thomas Falcon032c5e82015-12-21 11:26:06 -06002852static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2853 loff_t *ppos)
2854{
2855 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2856 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2857 struct device *dev = &adapter->vdev->dev;
2858 struct ibmvnic_fw_trace_entry *trace;
2859 int num = ras_comp_int->num;
2860 union ibmvnic_crq crq;
2861 dma_addr_t trace_tok;
2862
2863 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2864 return 0;
2865
2866 trace =
2867 dma_alloc_coherent(dev,
2868 be32_to_cpu(adapter->ras_comps[num].
2869 trace_buff_size), &trace_tok,
2870 GFP_KERNEL);
2871 if (!trace) {
2872 dev_err(dev, "Couldn't alloc trace buffer\n");
2873 return 0;
2874 }
2875
2876 memset(&crq, 0, sizeof(crq));
2877 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2878 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2879 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2880 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2881 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002882
2883 init_completion(&adapter->fw_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05002884 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002885 wait_for_completion(&adapter->fw_done);
2886
2887 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2888 len =
2889 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2890 *ppos;
2891
2892 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2893
2894 dma_free_coherent(dev,
2895 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2896 trace, trace_tok);
2897 *ppos += len;
2898 return len;
2899}
2900
2901static const struct file_operations trace_ops = {
2902 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002903 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002904 .read = trace_read,
2905};
2906
2907static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2908 loff_t *ppos)
2909{
2910 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2911 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2912 int num = ras_comp_int->num;
2913 char buff[5]; /* 1 or 0 plus \n and \0 */
2914 int size;
2915
2916 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2917
2918 if (*ppos >= size)
2919 return 0;
2920
2921 copy_to_user(user_buf, buff, size);
2922 *ppos += size;
2923 return size;
2924}
2925
2926static ssize_t paused_write(struct file *file, const char __user *user_buf,
2927 size_t len, loff_t *ppos)
2928{
2929 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2930 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2931 int num = ras_comp_int->num;
2932 union ibmvnic_crq crq;
2933 unsigned long val;
2934 char buff[9]; /* decimal max int plus \n and \0 */
2935
2936 copy_from_user(buff, user_buf, sizeof(buff));
2937 val = kstrtoul(buff, 10, NULL);
2938
2939 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2940
2941 memset(&crq, 0, sizeof(crq));
2942 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2943 crq.control_ras.cmd = CONTROL_RAS;
2944 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2945 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2946 ibmvnic_send_crq(adapter, &crq);
2947
2948 return len;
2949}
2950
2951static const struct file_operations paused_ops = {
2952 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002953 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002954 .read = paused_read,
2955 .write = paused_write,
2956};
2957
2958static ssize_t tracing_read(struct file *file, char __user *user_buf,
2959 size_t len, loff_t *ppos)
2960{
2961 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2962 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2963 int num = ras_comp_int->num;
2964 char buff[5]; /* 1 or 0 plus \n and \0 */
2965 int size;
2966
2967 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2968
2969 if (*ppos >= size)
2970 return 0;
2971
2972 copy_to_user(user_buf, buff, size);
2973 *ppos += size;
2974 return size;
2975}
2976
2977static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2978 size_t len, loff_t *ppos)
2979{
2980 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2981 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2982 int num = ras_comp_int->num;
2983 union ibmvnic_crq crq;
2984 unsigned long val;
2985 char buff[9]; /* decimal max int plus \n and \0 */
2986
2987 copy_from_user(buff, user_buf, sizeof(buff));
2988 val = kstrtoul(buff, 10, NULL);
2989
2990 memset(&crq, 0, sizeof(crq));
2991 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2992 crq.control_ras.cmd = CONTROL_RAS;
2993 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2994 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2995
2996 return len;
2997}
2998
2999static const struct file_operations tracing_ops = {
3000 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003001 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003002 .read = tracing_read,
3003 .write = tracing_write,
3004};
3005
3006static ssize_t error_level_read(struct file *file, char __user *user_buf,
3007 size_t len, loff_t *ppos)
3008{
3009 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3010 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3011 int num = ras_comp_int->num;
3012 char buff[5]; /* decimal max char plus \n and \0 */
3013 int size;
3014
3015 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
3016
3017 if (*ppos >= size)
3018 return 0;
3019
3020 copy_to_user(user_buf, buff, size);
3021 *ppos += size;
3022 return size;
3023}
3024
3025static ssize_t error_level_write(struct file *file, const char __user *user_buf,
3026 size_t len, loff_t *ppos)
3027{
3028 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3029 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3030 int num = ras_comp_int->num;
3031 union ibmvnic_crq crq;
3032 unsigned long val;
3033 char buff[9]; /* decimal max int plus \n and \0 */
3034
3035 copy_from_user(buff, user_buf, sizeof(buff));
3036 val = kstrtoul(buff, 10, NULL);
3037
3038 if (val > 9)
3039 val = 9;
3040
3041 memset(&crq, 0, sizeof(crq));
3042 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3043 crq.control_ras.cmd = CONTROL_RAS;
3044 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
3045 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
3046 crq.control_ras.level = val;
3047 ibmvnic_send_crq(adapter, &crq);
3048
3049 return len;
3050}
3051
3052static const struct file_operations error_level_ops = {
3053 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003054 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003055 .read = error_level_read,
3056 .write = error_level_write,
3057};
3058
3059static ssize_t trace_level_read(struct file *file, char __user *user_buf,
3060 size_t len, loff_t *ppos)
3061{
3062 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3063 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3064 int num = ras_comp_int->num;
3065 char buff[5]; /* decimal max char plus \n and \0 */
3066 int size;
3067
3068 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
3069 if (*ppos >= size)
3070 return 0;
3071
3072 copy_to_user(user_buf, buff, size);
3073 *ppos += size;
3074 return size;
3075}
3076
3077static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3078 size_t len, loff_t *ppos)
3079{
3080 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3081 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3082 union ibmvnic_crq crq;
3083 unsigned long val;
3084 char buff[9]; /* decimal max int plus \n and \0 */
3085
3086 copy_from_user(buff, user_buf, sizeof(buff));
3087 val = kstrtoul(buff, 10, NULL);
3088 if (val > 9)
3089 val = 9;
3090
3091 memset(&crq, 0, sizeof(crq));
3092 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3093 crq.control_ras.cmd = CONTROL_RAS;
3094 crq.control_ras.correlator =
3095 adapter->ras_comps[ras_comp_int->num].correlator;
3096 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3097 crq.control_ras.level = val;
3098 ibmvnic_send_crq(adapter, &crq);
3099
3100 return len;
3101}
3102
3103static const struct file_operations trace_level_ops = {
3104 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003105 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003106 .read = trace_level_read,
3107 .write = trace_level_write,
3108};
3109
3110static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3111 size_t len, loff_t *ppos)
3112{
3113 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3114 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3115 int num = ras_comp_int->num;
3116 char buff[9]; /* decimal max int plus \n and \0 */
3117 int size;
3118
3119 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3120 if (*ppos >= size)
3121 return 0;
3122
3123 copy_to_user(user_buf, buff, size);
3124 *ppos += size;
3125 return size;
3126}
3127
3128static ssize_t trace_buff_size_write(struct file *file,
3129 const char __user *user_buf, size_t len,
3130 loff_t *ppos)
3131{
3132 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3133 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3134 union ibmvnic_crq crq;
3135 unsigned long val;
3136 char buff[9]; /* decimal max int plus \n and \0 */
3137
3138 copy_from_user(buff, user_buf, sizeof(buff));
3139 val = kstrtoul(buff, 10, NULL);
3140
3141 memset(&crq, 0, sizeof(crq));
3142 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3143 crq.control_ras.cmd = CONTROL_RAS;
3144 crq.control_ras.correlator =
3145 adapter->ras_comps[ras_comp_int->num].correlator;
3146 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3147 /* trace_buff_sz is 3 bytes, stuff an int into it */
3148 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3149 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3150 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3151 ibmvnic_send_crq(adapter, &crq);
3152
3153 return len;
3154}
3155
3156static const struct file_operations trace_size_ops = {
3157 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003158 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003159 .read = trace_buff_size_read,
3160 .write = trace_buff_size_write,
3161};
3162
3163static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3164 struct ibmvnic_adapter *adapter)
3165{
3166 struct device *dev = &adapter->vdev->dev;
3167 struct dentry *dir_ent;
3168 struct dentry *ent;
3169 int i;
3170
3171 debugfs_remove_recursive(adapter->ras_comps_ent);
3172
3173 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3174 adapter->debugfs_dir);
3175 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3176 dev_info(dev, "debugfs create ras_comps dir failed\n");
3177 return;
3178 }
3179
3180 for (i = 0; i < adapter->ras_comp_num; i++) {
3181 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3182 adapter->ras_comps_ent);
3183 if (!dir_ent || IS_ERR(dir_ent)) {
3184 dev_info(dev, "debugfs create %s dir failed\n",
3185 adapter->ras_comps[i].name);
3186 continue;
3187 }
3188
3189 adapter->ras_comp_int[i].adapter = adapter;
3190 adapter->ras_comp_int[i].num = i;
3191 adapter->ras_comp_int[i].desc_blob.data =
3192 &adapter->ras_comps[i].description;
3193 adapter->ras_comp_int[i].desc_blob.size =
3194 sizeof(adapter->ras_comps[i].description);
3195
3196 /* Don't need to remember the dentry's because the debugfs dir
3197 * gets removed recursively
3198 */
3199 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3200 &adapter->ras_comp_int[i].desc_blob);
3201 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3202 dir_ent, &adapter->ras_comp_int[i],
3203 &trace_size_ops);
3204 ent = debugfs_create_file("trace_level",
3205 S_IRUGO |
3206 (adapter->ras_comps[i].trace_level !=
3207 0xFF ? S_IWUSR : 0),
3208 dir_ent, &adapter->ras_comp_int[i],
3209 &trace_level_ops);
3210 ent = debugfs_create_file("error_level",
3211 S_IRUGO |
3212 (adapter->
3213 ras_comps[i].error_check_level !=
3214 0xFF ? S_IWUSR : 0),
3215 dir_ent, &adapter->ras_comp_int[i],
3216 &trace_level_ops);
3217 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3218 dir_ent, &adapter->ras_comp_int[i],
3219 &tracing_ops);
3220 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3221 dir_ent, &adapter->ras_comp_int[i],
3222 &paused_ops);
3223 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3224 &adapter->ras_comp_int[i],
3225 &trace_ops);
3226 }
3227}
3228
3229static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3230 struct ibmvnic_adapter *adapter)
3231{
3232 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3233 struct device *dev = &adapter->vdev->dev;
3234 union ibmvnic_crq newcrq;
3235
3236 adapter->ras_comps = dma_alloc_coherent(dev, len,
3237 &adapter->ras_comps_tok,
3238 GFP_KERNEL);
3239 if (!adapter->ras_comps) {
3240 if (!firmware_has_feature(FW_FEATURE_CMO))
3241 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3242 return;
3243 }
3244
3245 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3246 sizeof(struct ibmvnic_fw_comp_internal),
3247 GFP_KERNEL);
3248 if (!adapter->ras_comp_int)
3249 dma_free_coherent(dev, len, adapter->ras_comps,
3250 adapter->ras_comps_tok);
3251
3252 memset(&newcrq, 0, sizeof(newcrq));
3253 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3254 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3255 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3256 newcrq.request_ras_comps.len = cpu_to_be32(len);
3257 ibmvnic_send_crq(adapter, &newcrq);
3258}
3259
3260static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3261{
Wei Yongjun96183182016-06-27 20:48:53 +08003262 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003263 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003264 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003265 unsigned long flags;
3266 unsigned long flags2;
3267
3268 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003269 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003270 switch (inflight_cmd->crq.generic.cmd) {
3271 case LOGIN:
3272 dma_unmap_single(dev, adapter->login_buf_token,
3273 adapter->login_buf_sz,
3274 DMA_BIDIRECTIONAL);
3275 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3276 adapter->login_rsp_buf_sz,
3277 DMA_BIDIRECTIONAL);
3278 kfree(adapter->login_rsp_buf);
3279 kfree(adapter->login_buf);
3280 break;
3281 case REQUEST_DUMP:
3282 complete(&adapter->fw_done);
3283 break;
3284 case REQUEST_ERROR_INFO:
3285 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08003286 list_for_each_entry_safe(error_buff, tmp2,
3287 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003288 dma_unmap_single(dev, error_buff->dma,
3289 error_buff->len,
3290 DMA_FROM_DEVICE);
3291 kfree(error_buff->buff);
3292 list_del(&error_buff->list);
3293 kfree(error_buff);
3294 }
3295 spin_unlock_irqrestore(&adapter->error_list_lock,
3296 flags2);
3297 break;
3298 }
3299 list_del(&inflight_cmd->list);
3300 kfree(inflight_cmd);
3301 }
3302 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3303}
3304
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003305static void ibmvnic_xport_event(struct work_struct *work)
3306{
3307 struct ibmvnic_adapter *adapter = container_of(work,
3308 struct ibmvnic_adapter,
3309 ibmvnic_xport);
3310 struct device *dev = &adapter->vdev->dev;
3311 long rc;
3312
3313 ibmvnic_free_inflight(adapter);
3314 release_sub_crqs(adapter);
3315 if (adapter->migrated) {
3316 rc = ibmvnic_reenable_crq_queue(adapter);
3317 if (rc)
3318 dev_err(dev, "Error after enable rc=%ld\n", rc);
3319 adapter->migrated = false;
3320 rc = ibmvnic_send_crq_init(adapter);
3321 if (rc)
3322 dev_err(dev, "Error sending init rc=%ld\n", rc);
3323 }
3324}
3325
Thomas Falcon032c5e82015-12-21 11:26:06 -06003326static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3327 struct ibmvnic_adapter *adapter)
3328{
3329 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3330 struct net_device *netdev = adapter->netdev;
3331 struct device *dev = &adapter->vdev->dev;
3332 long rc;
3333
3334 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3335 ((unsigned long int *)crq)[0],
3336 ((unsigned long int *)crq)[1]);
3337 switch (gen_crq->first) {
3338 case IBMVNIC_CRQ_INIT_RSP:
3339 switch (gen_crq->cmd) {
3340 case IBMVNIC_CRQ_INIT:
3341 dev_info(dev, "Partner initialized\n");
3342 /* Send back a response */
3343 rc = ibmvnic_send_crq_init_complete(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003344 if (!rc)
3345 schedule_work(&adapter->vnic_crq_init);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003346 else
3347 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3348 break;
3349 case IBMVNIC_CRQ_INIT_COMPLETE:
3350 dev_info(dev, "Partner initialization complete\n");
3351 send_version_xchg(adapter);
3352 break;
3353 default:
3354 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3355 }
3356 return;
3357 case IBMVNIC_CRQ_XPORT_EVENT:
3358 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3359 dev_info(dev, "Re-enabling adapter\n");
3360 adapter->migrated = true;
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003361 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003362 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3363 dev_info(dev, "Backing device failover detected\n");
3364 netif_carrier_off(netdev);
3365 adapter->failover = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003366 } else {
3367 /* The adapter lost the connection */
3368 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3369 gen_crq->cmd);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003370 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003371 }
3372 return;
3373 case IBMVNIC_CRQ_CMD_RSP:
3374 break;
3375 default:
3376 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3377 gen_crq->first);
3378 return;
3379 }
3380
3381 switch (gen_crq->cmd) {
3382 case VERSION_EXCHANGE_RSP:
3383 rc = crq->version_exchange_rsp.rc.code;
3384 if (rc) {
3385 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3386 break;
3387 }
3388 dev_info(dev, "Partner protocol version is %d\n",
3389 crq->version_exchange_rsp.version);
3390 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3391 ibmvnic_version)
3392 ibmvnic_version =
3393 be16_to_cpu(crq->version_exchange_rsp.version);
3394 send_cap_queries(adapter);
3395 break;
3396 case QUERY_CAPABILITY_RSP:
3397 handle_query_cap_rsp(crq, adapter);
3398 break;
3399 case QUERY_MAP_RSP:
3400 handle_query_map_rsp(crq, adapter);
3401 break;
3402 case REQUEST_MAP_RSP:
3403 handle_request_map_rsp(crq, adapter);
3404 break;
3405 case REQUEST_UNMAP_RSP:
3406 handle_request_unmap_rsp(crq, adapter);
3407 break;
3408 case REQUEST_CAPABILITY_RSP:
3409 handle_request_cap_rsp(crq, adapter);
3410 break;
3411 case LOGIN_RSP:
3412 netdev_dbg(netdev, "Got Login Response\n");
3413 handle_login_rsp(crq, adapter);
3414 break;
3415 case LOGICAL_LINK_STATE_RSP:
3416 netdev_dbg(netdev, "Got Logical Link State Response\n");
3417 adapter->logical_link_state =
3418 crq->logical_link_state_rsp.link_state;
3419 break;
3420 case LINK_STATE_INDICATION:
3421 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3422 adapter->phys_link_state =
3423 crq->link_state_indication.phys_link_state;
3424 adapter->logical_link_state =
3425 crq->link_state_indication.logical_link_state;
3426 break;
3427 case CHANGE_MAC_ADDR_RSP:
3428 netdev_dbg(netdev, "Got MAC address change Response\n");
3429 handle_change_mac_rsp(crq, adapter);
3430 break;
3431 case ERROR_INDICATION:
3432 netdev_dbg(netdev, "Got Error Indication\n");
3433 handle_error_indication(crq, adapter);
3434 break;
3435 case REQUEST_ERROR_RSP:
3436 netdev_dbg(netdev, "Got Error Detail Response\n");
3437 handle_error_info_rsp(crq, adapter);
3438 break;
3439 case REQUEST_STATISTICS_RSP:
3440 netdev_dbg(netdev, "Got Statistics Response\n");
3441 complete(&adapter->stats_done);
3442 break;
3443 case REQUEST_DUMP_SIZE_RSP:
3444 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3445 handle_dump_size_rsp(crq, adapter);
3446 break;
3447 case REQUEST_DUMP_RSP:
3448 netdev_dbg(netdev, "Got Request Dump Response\n");
3449 complete(&adapter->fw_done);
3450 break;
3451 case QUERY_IP_OFFLOAD_RSP:
3452 netdev_dbg(netdev, "Got Query IP offload Response\n");
3453 handle_query_ip_offload_rsp(adapter);
3454 break;
3455 case MULTICAST_CTRL_RSP:
3456 netdev_dbg(netdev, "Got multicast control Response\n");
3457 break;
3458 case CONTROL_IP_OFFLOAD_RSP:
3459 netdev_dbg(netdev, "Got Control IP offload Response\n");
3460 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3461 sizeof(adapter->ip_offload_ctrl),
3462 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05003463 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003464 break;
3465 case REQUEST_RAS_COMP_NUM_RSP:
3466 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3467 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3468 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3469 break;
3470 }
3471 adapter->ras_comp_num =
3472 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3473 handle_request_ras_comp_num_rsp(crq, adapter);
3474 break;
3475 case REQUEST_RAS_COMPS_RSP:
3476 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3477 handle_request_ras_comps_rsp(crq, adapter);
3478 break;
3479 case CONTROL_RAS_RSP:
3480 netdev_dbg(netdev, "Got Control RAS Response\n");
3481 handle_control_ras_rsp(crq, adapter);
3482 break;
3483 case COLLECT_FW_TRACE_RSP:
3484 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3485 complete(&adapter->fw_done);
3486 break;
3487 default:
3488 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3489 gen_crq->cmd);
3490 }
3491}
3492
3493static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3494{
3495 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06003496 unsigned long flags;
3497
3498 spin_lock_irqsave(&adapter->crq.lock, flags);
3499 vio_disable_interrupts(adapter->vdev);
3500 tasklet_schedule(&adapter->tasklet);
3501 spin_unlock_irqrestore(&adapter->crq.lock, flags);
3502 return IRQ_HANDLED;
3503}
3504
3505static void ibmvnic_tasklet(void *data)
3506{
3507 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003508 struct ibmvnic_crq_queue *queue = &adapter->crq;
3509 struct vio_dev *vdev = adapter->vdev;
3510 union ibmvnic_crq *crq;
3511 unsigned long flags;
3512 bool done = false;
3513
3514 spin_lock_irqsave(&queue->lock, flags);
3515 vio_disable_interrupts(vdev);
3516 while (!done) {
3517 /* Pull all the valid messages off the CRQ */
3518 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3519 ibmvnic_handle_crq(crq, adapter);
3520 crq->generic.first = 0;
3521 }
3522 vio_enable_interrupts(vdev);
3523 crq = ibmvnic_next_crq(adapter);
3524 if (crq) {
3525 vio_disable_interrupts(vdev);
3526 ibmvnic_handle_crq(crq, adapter);
3527 crq->generic.first = 0;
3528 } else {
Thomas Falcon249168a2017-02-15 12:18:00 -06003529 /* remain in tasklet until all
3530 * capabilities responses are received
3531 */
3532 if (!adapter->wait_capability)
3533 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003534 }
3535 }
Thomas Falcon249168a2017-02-15 12:18:00 -06003536 /* if capabilities CRQ's were sent in this tasklet, the following
3537 * tasklet must wait until all responses are received
3538 */
3539 if (atomic_read(&adapter->running_cap_crqs) != 0)
3540 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003541 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003542}
3543
3544static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3545{
3546 struct vio_dev *vdev = adapter->vdev;
3547 int rc;
3548
3549 do {
3550 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3551 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3552
3553 if (rc)
3554 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3555
3556 return rc;
3557}
3558
3559static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3560{
3561 struct ibmvnic_crq_queue *crq = &adapter->crq;
3562 struct device *dev = &adapter->vdev->dev;
3563 struct vio_dev *vdev = adapter->vdev;
3564 int rc;
3565
3566 /* Close the CRQ */
3567 do {
3568 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3569 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3570
3571 /* Clean out the queue */
3572 memset(crq->msgs, 0, PAGE_SIZE);
3573 crq->cur = 0;
3574
3575 /* And re-open it again */
3576 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3577 crq->msg_token, PAGE_SIZE);
3578
3579 if (rc == H_CLOSED)
3580 /* Adapter is good, but other end is not ready */
3581 dev_warn(dev, "Partner adapter not ready\n");
3582 else if (rc != 0)
3583 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3584
3585 return rc;
3586}
3587
3588static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3589{
3590 struct ibmvnic_crq_queue *crq = &adapter->crq;
3591 struct vio_dev *vdev = adapter->vdev;
3592 long rc;
3593
3594 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3595 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06003596 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003597 do {
3598 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3599 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3600
3601 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3602 DMA_BIDIRECTIONAL);
3603 free_page((unsigned long)crq->msgs);
3604}
3605
3606static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3607{
3608 struct ibmvnic_crq_queue *crq = &adapter->crq;
3609 struct device *dev = &adapter->vdev->dev;
3610 struct vio_dev *vdev = adapter->vdev;
3611 int rc, retrc = -ENOMEM;
3612
3613 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3614 /* Should we allocate more than one page? */
3615
3616 if (!crq->msgs)
3617 return -ENOMEM;
3618
3619 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3620 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3621 DMA_BIDIRECTIONAL);
3622 if (dma_mapping_error(dev, crq->msg_token))
3623 goto map_failed;
3624
3625 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3626 crq->msg_token, PAGE_SIZE);
3627
3628 if (rc == H_RESOURCE)
3629 /* maybe kexecing and resource is busy. try a reset */
3630 rc = ibmvnic_reset_crq(adapter);
3631 retrc = rc;
3632
3633 if (rc == H_CLOSED) {
3634 dev_warn(dev, "Partner adapter not ready\n");
3635 } else if (rc) {
3636 dev_warn(dev, "Error %d opening adapter\n", rc);
3637 goto reg_crq_failed;
3638 }
3639
3640 retrc = 0;
3641
Thomas Falcon6c267b32017-02-15 12:17:58 -06003642 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3643 (unsigned long)adapter);
3644
Thomas Falcon032c5e82015-12-21 11:26:06 -06003645 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3646 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3647 adapter);
3648 if (rc) {
3649 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3650 vdev->irq, rc);
3651 goto req_irq_failed;
3652 }
3653
3654 rc = vio_enable_interrupts(vdev);
3655 if (rc) {
3656 dev_err(dev, "Error %d enabling interrupts\n", rc);
3657 goto req_irq_failed;
3658 }
3659
3660 crq->cur = 0;
3661 spin_lock_init(&crq->lock);
3662
3663 return retrc;
3664
3665req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06003666 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003667 do {
3668 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3669 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3670reg_crq_failed:
3671 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3672map_failed:
3673 free_page((unsigned long)crq->msgs);
3674 return retrc;
3675}
3676
3677/* debugfs for dump */
3678static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3679{
3680 struct net_device *netdev = seq->private;
3681 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3682 struct device *dev = &adapter->vdev->dev;
3683 union ibmvnic_crq crq;
3684
3685 memset(&crq, 0, sizeof(crq));
3686 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3687 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003688
3689 init_completion(&adapter->fw_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003690 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003691 wait_for_completion(&adapter->fw_done);
3692
3693 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3694
3695 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3696 DMA_BIDIRECTIONAL);
3697
3698 kfree(adapter->dump_data);
3699
3700 return 0;
3701}
3702
3703static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3704{
3705 return single_open(file, ibmvnic_dump_show, inode->i_private);
3706}
3707
3708static const struct file_operations ibmvnic_dump_ops = {
3709 .owner = THIS_MODULE,
3710 .open = ibmvnic_dump_open,
3711 .read = seq_read,
3712 .llseek = seq_lseek,
3713 .release = single_release,
3714};
3715
Thomas Falcon65dc6892016-07-06 15:35:18 -05003716static void handle_crq_init_rsp(struct work_struct *work)
3717{
3718 struct ibmvnic_adapter *adapter = container_of(work,
3719 struct ibmvnic_adapter,
3720 vnic_crq_init);
3721 struct device *dev = &adapter->vdev->dev;
3722 struct net_device *netdev = adapter->netdev;
3723 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003724 bool restart = false;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003725 int rc;
3726
Thomas Falcondfad09a2016-08-18 11:37:51 -05003727 if (adapter->failover) {
3728 release_sub_crqs(adapter);
3729 if (netif_running(netdev)) {
3730 netif_tx_disable(netdev);
3731 ibmvnic_close(netdev);
3732 restart = true;
3733 }
3734 }
3735
Thomas Falcon65dc6892016-07-06 15:35:18 -05003736 reinit_completion(&adapter->init_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003737 send_version_xchg(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003738 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3739 dev_err(dev, "Passive init timeout\n");
3740 goto task_failed;
3741 }
3742
Thomas Falconf39f0d12017-02-14 10:22:59 -06003743 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003744
Thomas Falcondfad09a2016-08-18 11:37:51 -05003745 if (adapter->failover) {
3746 adapter->failover = false;
3747 if (restart) {
3748 rc = ibmvnic_open(netdev);
3749 if (rc)
3750 goto restart_failed;
3751 }
3752 netif_carrier_on(netdev);
3753 return;
3754 }
3755
Thomas Falcon65dc6892016-07-06 15:35:18 -05003756 rc = register_netdev(netdev);
3757 if (rc) {
3758 dev_err(dev,
3759 "failed to register netdev rc=%d\n", rc);
3760 goto register_failed;
3761 }
3762 dev_info(dev, "ibmvnic registered\n");
3763
3764 return;
3765
Thomas Falcondfad09a2016-08-18 11:37:51 -05003766restart_failed:
3767 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003768register_failed:
3769 release_sub_crqs(adapter);
3770task_failed:
3771 dev_err(dev, "Passive initialization was not successful\n");
3772}
3773
Thomas Falcon032c5e82015-12-21 11:26:06 -06003774static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3775{
Thomas Falconea22d512016-07-06 15:35:17 -05003776 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003777 struct ibmvnic_adapter *adapter;
3778 struct net_device *netdev;
3779 unsigned char *mac_addr_p;
3780 struct dentry *ent;
Thomas Falcone1fac0a2016-11-11 11:00:46 -06003781 char buf[17]; /* debugfs name buf */
Thomas Falcon032c5e82015-12-21 11:26:06 -06003782 int rc;
3783
3784 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3785 dev->unit_address);
3786
3787 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3788 VETH_MAC_ADDR, NULL);
3789 if (!mac_addr_p) {
3790 dev_err(&dev->dev,
3791 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3792 __FILE__, __LINE__);
3793 return 0;
3794 }
3795
3796 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3797 IBMVNIC_MAX_TX_QUEUES);
3798 if (!netdev)
3799 return -ENOMEM;
3800
3801 adapter = netdev_priv(netdev);
3802 dev_set_drvdata(&dev->dev, netdev);
3803 adapter->vdev = dev;
3804 adapter->netdev = netdev;
Thomas Falcondfad09a2016-08-18 11:37:51 -05003805 adapter->failover = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003806
3807 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3808 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3809 netdev->irq = dev->irq;
3810 netdev->netdev_ops = &ibmvnic_netdev_ops;
3811 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3812 SET_NETDEV_DEV(netdev, &dev->dev);
3813
Thomas Falcon65dc6892016-07-06 15:35:18 -05003814 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003815 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003816
Thomas Falcon032c5e82015-12-21 11:26:06 -06003817 spin_lock_init(&adapter->stats_lock);
3818
3819 rc = ibmvnic_init_crq_queue(adapter);
3820 if (rc) {
3821 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3822 goto free_netdev;
3823 }
3824
3825 INIT_LIST_HEAD(&adapter->errors);
3826 INIT_LIST_HEAD(&adapter->inflight);
3827 spin_lock_init(&adapter->error_list_lock);
3828 spin_lock_init(&adapter->inflight_lock);
3829
3830 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3831 sizeof(struct ibmvnic_statistics),
3832 DMA_FROM_DEVICE);
3833 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3834 if (!firmware_has_feature(FW_FEATURE_CMO))
3835 dev_err(&dev->dev, "Couldn't map stats buffer\n");
Wei Yongjun0e872032016-08-24 13:47:58 +00003836 rc = -ENOMEM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003837 goto free_crq;
3838 }
3839
3840 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3841 ent = debugfs_create_dir(buf, NULL);
3842 if (!ent || IS_ERR(ent)) {
3843 dev_info(&dev->dev, "debugfs create directory failed\n");
3844 adapter->debugfs_dir = NULL;
3845 } else {
3846 adapter->debugfs_dir = ent;
3847 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3848 netdev, &ibmvnic_dump_ops);
3849 if (!ent || IS_ERR(ent)) {
3850 dev_info(&dev->dev,
3851 "debugfs create dump file failed\n");
3852 adapter->debugfs_dump = NULL;
3853 } else {
3854 adapter->debugfs_dump = ent;
3855 }
3856 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003857
3858 init_completion(&adapter->init_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003859 ibmvnic_send_crq_init(adapter);
Thomas Falconea22d512016-07-06 15:35:17 -05003860 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3861 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003862
Thomas Falconf39f0d12017-02-14 10:22:59 -06003863 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003864
3865 rc = register_netdev(netdev);
3866 if (rc) {
3867 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
John Allenbd0b6722017-03-17 17:13:40 -05003868 goto free_debugfs;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003869 }
3870 dev_info(&dev->dev, "ibmvnic registered\n");
3871
3872 return 0;
3873
Thomas Falcon032c5e82015-12-21 11:26:06 -06003874free_debugfs:
3875 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3876 debugfs_remove_recursive(adapter->debugfs_dir);
3877free_crq:
3878 ibmvnic_release_crq_queue(adapter);
3879free_netdev:
3880 free_netdev(netdev);
3881 return rc;
3882}
3883
3884static int ibmvnic_remove(struct vio_dev *dev)
3885{
3886 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3887 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3888
3889 unregister_netdev(netdev);
3890
3891 release_sub_crqs(adapter);
3892
3893 ibmvnic_release_crq_queue(adapter);
3894
3895 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3896 debugfs_remove_recursive(adapter->debugfs_dir);
3897
Thomas Falconb7f193d2016-11-11 11:00:45 -06003898 dma_unmap_single(&dev->dev, adapter->stats_token,
3899 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3900
Thomas Falcon032c5e82015-12-21 11:26:06 -06003901 if (adapter->ras_comps)
3902 dma_free_coherent(&dev->dev,
3903 adapter->ras_comp_num *
3904 sizeof(struct ibmvnic_fw_component),
3905 adapter->ras_comps, adapter->ras_comps_tok);
3906
3907 kfree(adapter->ras_comp_int);
3908
3909 free_netdev(netdev);
3910 dev_set_drvdata(&dev->dev, NULL);
3911
3912 return 0;
3913}
3914
3915static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3916{
3917 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3918 struct ibmvnic_adapter *adapter;
3919 struct iommu_table *tbl;
3920 unsigned long ret = 0;
3921 int i;
3922
3923 tbl = get_iommu_table_base(&vdev->dev);
3924
3925 /* netdev inits at probe time along with the structures we need below*/
3926 if (!netdev)
3927 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3928
3929 adapter = netdev_priv(netdev);
3930
3931 ret += PAGE_SIZE; /* the crq message queue */
3932 ret += adapter->bounce_buffer_size;
3933 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3934
3935 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3936 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3937
3938 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3939 i++)
3940 ret += adapter->rx_pool[i].size *
3941 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3942
3943 return ret;
3944}
3945
3946static int ibmvnic_resume(struct device *dev)
3947{
3948 struct net_device *netdev = dev_get_drvdata(dev);
3949 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3950 int i;
3951
3952 /* kick the interrupt handlers just in case we lost an interrupt */
3953 for (i = 0; i < adapter->req_rx_queues; i++)
3954 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3955 adapter->rx_scrq[i]);
3956
3957 return 0;
3958}
3959
3960static struct vio_device_id ibmvnic_device_table[] = {
3961 {"network", "IBM,vnic"},
3962 {"", "" }
3963};
3964MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3965
3966static const struct dev_pm_ops ibmvnic_pm_ops = {
3967 .resume = ibmvnic_resume
3968};
3969
3970static struct vio_driver ibmvnic_driver = {
3971 .id_table = ibmvnic_device_table,
3972 .probe = ibmvnic_probe,
3973 .remove = ibmvnic_remove,
3974 .get_desired_dma = ibmvnic_get_desired_dma,
3975 .name = ibmvnic_driver_name,
3976 .pm = &ibmvnic_pm_ops,
3977};
3978
3979/* module functions */
3980static int __init ibmvnic_module_init(void)
3981{
3982 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3983 IBMVNIC_DRIVER_VERSION);
3984
3985 return vio_register_driver(&ibmvnic_driver);
3986}
3987
3988static void __exit ibmvnic_module_exit(void)
3989{
3990 vio_unregister_driver(&ibmvnic_driver);
3991}
3992
3993module_init(ibmvnic_module_init);
3994module_exit(ibmvnic_module_exit);