blob: 7af4add1627608aeeb54f96cc4227b5eea7b7500 [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060028#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050029#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
Kevin Barnett699bed72016-08-31 14:55:36 -050042#define DRIVER_VERSION "0.9.13-370"
Kevin Barnett6c223762016-06-27 16:41:00 -050043#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
Kevin Barnett699bed72016-08-31 14:55:36 -050045#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
Kevin Barnett6c223762016-06-27 16:41:00 -050047
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
51MODULE_AUTHOR("Microsemi");
52MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 DRIVER_VERSION);
54MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
55MODULE_VERSION(DRIVER_VERSION);
56MODULE_LICENSE("GPL");
57
58#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
59
60static char *hpe_branded_controller = "HPE Smart Array Controller";
61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62
63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65static void pqi_scan_start(struct Scsi_Host *shost);
66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 struct pqi_io_request *io_request);
69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 struct pqi_iu_header *request, unsigned int flags,
71 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info);
76
77/* for flags argument to pqi_submit_raid_request_synchronous() */
78#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79
80static struct scsi_transport_template *pqi_sas_transport_template;
81
82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83
Kevin Barnett6a50d6a2017-05-03 18:52:52 -050084static unsigned int pqi_supported_event_types[] = {
85 PQI_EVENT_TYPE_HOTPLUG,
86 PQI_EVENT_TYPE_HARDWARE,
87 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
88 PQI_EVENT_TYPE_LOGICAL_DEVICE,
89 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
90 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
91};
92
Kevin Barnett6c223762016-06-27 16:41:00 -050093static int pqi_disable_device_id_wildcards;
94module_param_named(disable_device_id_wildcards,
95 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
96MODULE_PARM_DESC(disable_device_id_wildcards,
97 "Disable device ID wildcards.");
98
99static char *raid_levels[] = {
100 "RAID-0",
101 "RAID-4",
102 "RAID-1(1+0)",
103 "RAID-5",
104 "RAID-5+1",
105 "RAID-ADG",
106 "RAID-1(ADM)",
107};
108
109static char *pqi_raid_level_to_string(u8 raid_level)
110{
111 if (raid_level < ARRAY_SIZE(raid_levels))
112 return raid_levels[raid_level];
113
114 return "";
115}
116
117#define SA_RAID_0 0
118#define SA_RAID_4 1
119#define SA_RAID_1 2 /* also used for RAID 10 */
120#define SA_RAID_5 3 /* also used for RAID 50 */
121#define SA_RAID_51 4
122#define SA_RAID_6 5 /* also used for RAID 60 */
123#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
124#define SA_RAID_MAX SA_RAID_ADM
125#define SA_RAID_UNKNOWN 0xff
126
127static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
128{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500129 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500130 scmd->scsi_done(scmd);
131}
132
133static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
134{
135 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
136}
137
138static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
139{
140 void *hostdata = shost_priv(shost);
141
142 return *((struct pqi_ctrl_info **)hostdata);
143}
144
145static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
146{
147 return !device->is_physical_device;
148}
149
150static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
151{
152 return !ctrl_info->controller_online;
153}
154
155static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
156{
157 if (ctrl_info->controller_online)
158 if (!sis_is_firmware_running(ctrl_info))
159 pqi_take_ctrl_offline(ctrl_info);
160}
161
162static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
163{
164 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
165}
166
Kevin Barnettff6abb72016-08-31 14:54:41 -0500167static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
168 struct pqi_ctrl_info *ctrl_info)
169{
170 return sis_read_driver_scratch(ctrl_info);
171}
172
173static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
174 enum pqi_ctrl_mode mode)
175{
176 sis_write_driver_scratch(ctrl_info, mode);
177}
178
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500179#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
180static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
181{
182 ctrl_info->block_requests = true;
183 scsi_block_requests(ctrl_info->scsi_host);
184}
185
186static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
187{
188 ctrl_info->block_requests = false;
189 wake_up_all(&ctrl_info->block_requests_wait);
190 scsi_unblock_requests(ctrl_info->scsi_host);
191}
192
193static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
194{
195 return ctrl_info->block_requests;
196}
197
198static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
199 unsigned long timeout_msecs)
200{
201 unsigned long remaining_msecs;
202
203 if (!pqi_ctrl_blocked(ctrl_info))
204 return timeout_msecs;
205
206 atomic_inc(&ctrl_info->num_blocked_threads);
207
208 if (timeout_msecs == NO_TIMEOUT) {
209 wait_event(ctrl_info->block_requests_wait,
210 !pqi_ctrl_blocked(ctrl_info));
211 remaining_msecs = timeout_msecs;
212 } else {
213 unsigned long remaining_jiffies;
214
215 remaining_jiffies =
216 wait_event_timeout(ctrl_info->block_requests_wait,
217 !pqi_ctrl_blocked(ctrl_info),
218 msecs_to_jiffies(timeout_msecs));
219 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
220 }
221
222 atomic_dec(&ctrl_info->num_blocked_threads);
223
224 return remaining_msecs;
225}
226
227static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
228{
229 atomic_inc(&ctrl_info->num_busy_threads);
230}
231
232static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
233{
234 atomic_dec(&ctrl_info->num_busy_threads);
235}
236
237static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
238{
239 while (atomic_read(&ctrl_info->num_busy_threads) >
240 atomic_read(&ctrl_info->num_blocked_threads))
241 usleep_range(1000, 2000);
242}
243
244static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
245{
246 device->in_reset = true;
247}
248
249static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
250{
251 device->in_reset = false;
252}
253
254static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
255{
256 return device->in_reset;
257}
Kevin Barnett6c223762016-06-27 16:41:00 -0500258
259static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
260{
261 schedule_delayed_work(&ctrl_info->rescan_work,
262 PQI_RESCAN_WORK_INTERVAL);
263}
264
Kevin Barnett061ef062017-05-03 18:53:05 -0500265static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
266{
267 cancel_delayed_work_sync(&ctrl_info->rescan_work);
268}
269
Kevin Barnett6c223762016-06-27 16:41:00 -0500270static int pqi_map_single(struct pci_dev *pci_dev,
271 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
272 size_t buffer_length, int data_direction)
273{
274 dma_addr_t bus_address;
275
276 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
277 return 0;
278
279 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
280 data_direction);
281 if (pci_dma_mapping_error(pci_dev, bus_address))
282 return -ENOMEM;
283
284 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
285 put_unaligned_le32(buffer_length, &sg_descriptor->length);
286 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
287
288 return 0;
289}
290
291static void pqi_pci_unmap(struct pci_dev *pci_dev,
292 struct pqi_sg_descriptor *descriptors, int num_descriptors,
293 int data_direction)
294{
295 int i;
296
297 if (data_direction == PCI_DMA_NONE)
298 return;
299
300 for (i = 0; i < num_descriptors; i++)
301 pci_unmap_single(pci_dev,
302 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
303 get_unaligned_le32(&descriptors[i].length),
304 data_direction);
305}
306
307static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
308 struct pqi_raid_path_request *request, u8 cmd,
309 u8 *scsi3addr, void *buffer, size_t buffer_length,
310 u16 vpd_page, int *pci_direction)
311{
312 u8 *cdb;
313 int pci_dir;
314
315 memset(request, 0, sizeof(*request));
316
317 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
318 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
319 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
320 &request->header.iu_length);
321 put_unaligned_le32(buffer_length, &request->buffer_length);
322 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
323 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
324 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
325
326 cdb = request->cdb;
327
328 switch (cmd) {
329 case INQUIRY:
330 request->data_direction = SOP_READ_FLAG;
331 cdb[0] = INQUIRY;
332 if (vpd_page & VPD_PAGE) {
333 cdb[1] = 0x1;
334 cdb[2] = (u8)vpd_page;
335 }
336 cdb[4] = (u8)buffer_length;
337 break;
338 case CISS_REPORT_LOG:
339 case CISS_REPORT_PHYS:
340 request->data_direction = SOP_READ_FLAG;
341 cdb[0] = cmd;
342 if (cmd == CISS_REPORT_PHYS)
343 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
344 else
345 cdb[1] = CISS_REPORT_LOG_EXTENDED;
346 put_unaligned_be32(buffer_length, &cdb[6]);
347 break;
348 case CISS_GET_RAID_MAP:
349 request->data_direction = SOP_READ_FLAG;
350 cdb[0] = CISS_READ;
351 cdb[1] = CISS_GET_RAID_MAP;
352 put_unaligned_be32(buffer_length, &cdb[6]);
353 break;
354 case SA_CACHE_FLUSH:
355 request->data_direction = SOP_WRITE_FLAG;
356 cdb[0] = BMIC_WRITE;
357 cdb[6] = BMIC_CACHE_FLUSH;
358 put_unaligned_be16(buffer_length, &cdb[7]);
359 break;
360 case BMIC_IDENTIFY_CONTROLLER:
361 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
362 request->data_direction = SOP_READ_FLAG;
363 cdb[0] = BMIC_READ;
364 cdb[6] = cmd;
365 put_unaligned_be16(buffer_length, &cdb[7]);
366 break;
367 case BMIC_WRITE_HOST_WELLNESS:
368 request->data_direction = SOP_WRITE_FLAG;
369 cdb[0] = BMIC_WRITE;
370 cdb[6] = cmd;
371 put_unaligned_be16(buffer_length, &cdb[7]);
372 break;
373 default:
374 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
375 cmd);
376 WARN_ON(cmd);
377 break;
378 }
379
380 switch (request->data_direction) {
381 case SOP_READ_FLAG:
382 pci_dir = PCI_DMA_FROMDEVICE;
383 break;
384 case SOP_WRITE_FLAG:
385 pci_dir = PCI_DMA_TODEVICE;
386 break;
387 case SOP_NO_DIRECTION_FLAG:
388 pci_dir = PCI_DMA_NONE;
389 break;
390 default:
391 pci_dir = PCI_DMA_BIDIRECTIONAL;
392 break;
393 }
394
395 *pci_direction = pci_dir;
396
397 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
398 buffer, buffer_length, pci_dir);
399}
400
401static struct pqi_io_request *pqi_alloc_io_request(
402 struct pqi_ctrl_info *ctrl_info)
403{
404 struct pqi_io_request *io_request;
405 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
406
407 while (1) {
408 io_request = &ctrl_info->io_request_pool[i];
409 if (atomic_inc_return(&io_request->refcount) == 1)
410 break;
411 atomic_dec(&io_request->refcount);
412 i = (i + 1) % ctrl_info->max_io_slots;
413 }
414
415 /* benignly racy */
416 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
417
418 io_request->scmd = NULL;
419 io_request->status = 0;
420 io_request->error_info = NULL;
421
422 return io_request;
423}
424
425static void pqi_free_io_request(struct pqi_io_request *io_request)
426{
427 atomic_dec(&io_request->refcount);
428}
429
430static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
431 struct bmic_identify_controller *buffer)
432{
433 int rc;
434 int pci_direction;
435 struct pqi_raid_path_request request;
436
437 rc = pqi_build_raid_path_request(ctrl_info, &request,
438 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
439 sizeof(*buffer), 0, &pci_direction);
440 if (rc)
441 return rc;
442
443 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
444 NULL, NO_TIMEOUT);
445
446 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
447 pci_direction);
448
449 return rc;
450}
451
452static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
453 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
454{
455 int rc;
456 int pci_direction;
457 struct pqi_raid_path_request request;
458
459 rc = pqi_build_raid_path_request(ctrl_info, &request,
460 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
461 &pci_direction);
462 if (rc)
463 return rc;
464
465 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
466 NULL, NO_TIMEOUT);
467
468 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
469 pci_direction);
470
471 return rc;
472}
473
474static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
475 struct pqi_scsi_dev *device,
476 struct bmic_identify_physical_device *buffer,
477 size_t buffer_length)
478{
479 int rc;
480 int pci_direction;
481 u16 bmic_device_index;
482 struct pqi_raid_path_request request;
483
484 rc = pqi_build_raid_path_request(ctrl_info, &request,
485 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
486 buffer_length, 0, &pci_direction);
487 if (rc)
488 return rc;
489
490 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
491 request.cdb[2] = (u8)bmic_device_index;
492 request.cdb[9] = (u8)(bmic_device_index >> 8);
493
494 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
495 0, NULL, NO_TIMEOUT);
496
497 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
498 pci_direction);
499
500 return rc;
501}
502
503#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
Kevin Barnett6c223762016-06-27 16:41:00 -0500504
505static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
506{
507 int rc;
508 struct pqi_raid_path_request request;
509 int pci_direction;
510 u8 *buffer;
511
512 /*
513 * Don't bother trying to flush the cache if the controller is
514 * locked up.
515 */
516 if (pqi_ctrl_offline(ctrl_info))
517 return -ENXIO;
518
519 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
520 if (!buffer)
521 return -ENOMEM;
522
523 rc = pqi_build_raid_path_request(ctrl_info, &request,
524 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
525 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
526 if (rc)
527 goto out;
528
529 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
Kevin Barnettd48f8fa2016-08-31 14:55:17 -0500530 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500531
532 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
533 pci_direction);
534
535out:
536 kfree(buffer);
537
538 return rc;
539}
540
541static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
542 void *buffer, size_t buffer_length)
543{
544 int rc;
545 struct pqi_raid_path_request request;
546 int pci_direction;
547
548 rc = pqi_build_raid_path_request(ctrl_info, &request,
549 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
550 buffer_length, 0, &pci_direction);
551 if (rc)
552 return rc;
553
554 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
555 0, NULL, NO_TIMEOUT);
556
557 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
558 pci_direction);
559
560 return rc;
561}
562
563#pragma pack(1)
564
565struct bmic_host_wellness_driver_version {
566 u8 start_tag[4];
567 u8 driver_version_tag[2];
568 __le16 driver_version_length;
569 char driver_version[32];
570 u8 end_tag[2];
571};
572
573#pragma pack()
574
575static int pqi_write_driver_version_to_host_wellness(
576 struct pqi_ctrl_info *ctrl_info)
577{
578 int rc;
579 struct bmic_host_wellness_driver_version *buffer;
580 size_t buffer_length;
581
582 buffer_length = sizeof(*buffer);
583
584 buffer = kmalloc(buffer_length, GFP_KERNEL);
585 if (!buffer)
586 return -ENOMEM;
587
588 buffer->start_tag[0] = '<';
589 buffer->start_tag[1] = 'H';
590 buffer->start_tag[2] = 'W';
591 buffer->start_tag[3] = '>';
592 buffer->driver_version_tag[0] = 'D';
593 buffer->driver_version_tag[1] = 'V';
594 put_unaligned_le16(sizeof(buffer->driver_version),
595 &buffer->driver_version_length);
Kevin Barnett061ef062017-05-03 18:53:05 -0500596 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
Kevin Barnett6c223762016-06-27 16:41:00 -0500597 sizeof(buffer->driver_version) - 1);
598 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
599 buffer->end_tag[0] = 'Z';
600 buffer->end_tag[1] = 'Z';
601
602 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
603
604 kfree(buffer);
605
606 return rc;
607}
608
609#pragma pack(1)
610
611struct bmic_host_wellness_time {
612 u8 start_tag[4];
613 u8 time_tag[2];
614 __le16 time_length;
615 u8 time[8];
616 u8 dont_write_tag[2];
617 u8 end_tag[2];
618};
619
620#pragma pack()
621
622static int pqi_write_current_time_to_host_wellness(
623 struct pqi_ctrl_info *ctrl_info)
624{
625 int rc;
626 struct bmic_host_wellness_time *buffer;
627 size_t buffer_length;
628 time64_t local_time;
629 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100630 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500631
632 buffer_length = sizeof(*buffer);
633
634 buffer = kmalloc(buffer_length, GFP_KERNEL);
635 if (!buffer)
636 return -ENOMEM;
637
638 buffer->start_tag[0] = '<';
639 buffer->start_tag[1] = 'H';
640 buffer->start_tag[2] = 'W';
641 buffer->start_tag[3] = '>';
642 buffer->time_tag[0] = 'T';
643 buffer->time_tag[1] = 'D';
644 put_unaligned_le16(sizeof(buffer->time),
645 &buffer->time_length);
646
Arnd Bergmanned108582017-02-17 16:03:52 +0100647 local_time = ktime_get_real_seconds();
648 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500649 year = tm.tm_year + 1900;
650
651 buffer->time[0] = bin2bcd(tm.tm_hour);
652 buffer->time[1] = bin2bcd(tm.tm_min);
653 buffer->time[2] = bin2bcd(tm.tm_sec);
654 buffer->time[3] = 0;
655 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
656 buffer->time[5] = bin2bcd(tm.tm_mday);
657 buffer->time[6] = bin2bcd(year / 100);
658 buffer->time[7] = bin2bcd(year % 100);
659
660 buffer->dont_write_tag[0] = 'D';
661 buffer->dont_write_tag[1] = 'W';
662 buffer->end_tag[0] = 'Z';
663 buffer->end_tag[1] = 'Z';
664
665 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
666
667 kfree(buffer);
668
669 return rc;
670}
671
672#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
673
674static void pqi_update_time_worker(struct work_struct *work)
675{
676 int rc;
677 struct pqi_ctrl_info *ctrl_info;
678
679 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
680 update_time_work);
681
Kevin Barnett6c223762016-06-27 16:41:00 -0500682 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
683 if (rc)
684 dev_warn(&ctrl_info->pci_dev->dev,
685 "error updating time on controller\n");
686
687 schedule_delayed_work(&ctrl_info->update_time_work,
688 PQI_UPDATE_TIME_WORK_INTERVAL);
689}
690
691static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500692 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500693{
Kevin Barnett061ef062017-05-03 18:53:05 -0500694 if (ctrl_info->update_time_worker_scheduled)
695 return;
696
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500697 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett061ef062017-05-03 18:53:05 -0500698 ctrl_info->update_time_worker_scheduled = true;
699}
700
701static inline void pqi_cancel_update_time_worker(
702 struct pqi_ctrl_info *ctrl_info)
703{
704 if (!ctrl_info->update_time_worker_scheduled)
705 return;
706
707 cancel_delayed_work_sync(&ctrl_info->update_time_work);
708 ctrl_info->update_time_worker_scheduled = false;
Kevin Barnett6c223762016-06-27 16:41:00 -0500709}
710
711static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
712 void *buffer, size_t buffer_length)
713{
714 int rc;
715 int pci_direction;
716 struct pqi_raid_path_request request;
717
718 rc = pqi_build_raid_path_request(ctrl_info, &request,
719 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
720 if (rc)
721 return rc;
722
723 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
724 NULL, NO_TIMEOUT);
725
726 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
727 pci_direction);
728
729 return rc;
730}
731
732static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
733 void **buffer)
734{
735 int rc;
736 size_t lun_list_length;
737 size_t lun_data_length;
738 size_t new_lun_list_length;
739 void *lun_data = NULL;
740 struct report_lun_header *report_lun_header;
741
742 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
743 if (!report_lun_header) {
744 rc = -ENOMEM;
745 goto out;
746 }
747
748 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
749 sizeof(*report_lun_header));
750 if (rc)
751 goto out;
752
753 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
754
755again:
756 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
757
758 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
759 if (!lun_data) {
760 rc = -ENOMEM;
761 goto out;
762 }
763
764 if (lun_list_length == 0) {
765 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
766 goto out;
767 }
768
769 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
770 if (rc)
771 goto out;
772
773 new_lun_list_length = get_unaligned_be32(
774 &((struct report_lun_header *)lun_data)->list_length);
775
776 if (new_lun_list_length > lun_list_length) {
777 lun_list_length = new_lun_list_length;
778 kfree(lun_data);
779 goto again;
780 }
781
782out:
783 kfree(report_lun_header);
784
785 if (rc) {
786 kfree(lun_data);
787 lun_data = NULL;
788 }
789
790 *buffer = lun_data;
791
792 return rc;
793}
794
795static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
796 void **buffer)
797{
798 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
799 buffer);
800}
801
802static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
803 void **buffer)
804{
805 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
806}
807
808static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
809 struct report_phys_lun_extended **physdev_list,
810 struct report_log_lun_extended **logdev_list)
811{
812 int rc;
813 size_t logdev_list_length;
814 size_t logdev_data_length;
815 struct report_log_lun_extended *internal_logdev_list;
816 struct report_log_lun_extended *logdev_data;
817 struct report_lun_header report_lun_header;
818
819 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
820 if (rc)
821 dev_err(&ctrl_info->pci_dev->dev,
822 "report physical LUNs failed\n");
823
824 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
825 if (rc)
826 dev_err(&ctrl_info->pci_dev->dev,
827 "report logical LUNs failed\n");
828
829 /*
830 * Tack the controller itself onto the end of the logical device list.
831 */
832
833 logdev_data = *logdev_list;
834
835 if (logdev_data) {
836 logdev_list_length =
837 get_unaligned_be32(&logdev_data->header.list_length);
838 } else {
839 memset(&report_lun_header, 0, sizeof(report_lun_header));
840 logdev_data =
841 (struct report_log_lun_extended *)&report_lun_header;
842 logdev_list_length = 0;
843 }
844
845 logdev_data_length = sizeof(struct report_lun_header) +
846 logdev_list_length;
847
848 internal_logdev_list = kmalloc(logdev_data_length +
849 sizeof(struct report_log_lun_extended), GFP_KERNEL);
850 if (!internal_logdev_list) {
851 kfree(*logdev_list);
852 *logdev_list = NULL;
853 return -ENOMEM;
854 }
855
856 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
857 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
858 sizeof(struct report_log_lun_extended_entry));
859 put_unaligned_be32(logdev_list_length +
860 sizeof(struct report_log_lun_extended_entry),
861 &internal_logdev_list->header.list_length);
862
863 kfree(*logdev_list);
864 *logdev_list = internal_logdev_list;
865
866 return 0;
867}
868
869static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
870 int bus, int target, int lun)
871{
872 device->bus = bus;
873 device->target = target;
874 device->lun = lun;
875}
876
877static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
878{
879 u8 *scsi3addr;
880 u32 lunid;
881
882 scsi3addr = device->scsi3addr;
883 lunid = get_unaligned_le32(scsi3addr);
884
885 if (pqi_is_hba_lunid(scsi3addr)) {
886 /* The specified device is the controller. */
887 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
888 device->target_lun_valid = true;
889 return;
890 }
891
892 if (pqi_is_logical_device(device)) {
893 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
894 lunid & 0x3fff);
895 device->target_lun_valid = true;
896 return;
897 }
898
899 /*
900 * Defer target and LUN assignment for non-controller physical devices
901 * because the SAS transport layer will make these assignments later.
902 */
903 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
904}
905
906static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
907 struct pqi_scsi_dev *device)
908{
909 int rc;
910 u8 raid_level;
911 u8 *buffer;
912
913 raid_level = SA_RAID_UNKNOWN;
914
915 buffer = kmalloc(64, GFP_KERNEL);
916 if (buffer) {
917 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
918 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
919 if (rc == 0) {
920 raid_level = buffer[8];
921 if (raid_level > SA_RAID_MAX)
922 raid_level = SA_RAID_UNKNOWN;
923 }
924 kfree(buffer);
925 }
926
927 device->raid_level = raid_level;
928}
929
930static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
931 struct pqi_scsi_dev *device, struct raid_map *raid_map)
932{
933 char *err_msg;
934 u32 raid_map_size;
935 u32 r5or6_blocks_per_row;
936 unsigned int num_phys_disks;
937 unsigned int num_raid_map_entries;
938
939 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
940
941 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
942 err_msg = "RAID map too small";
943 goto bad_raid_map;
944 }
945
946 if (raid_map_size > sizeof(*raid_map)) {
947 err_msg = "RAID map too large";
948 goto bad_raid_map;
949 }
950
951 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
952 (get_unaligned_le16(&raid_map->data_disks_per_row) +
953 get_unaligned_le16(&raid_map->metadata_disks_per_row));
954 num_raid_map_entries = num_phys_disks *
955 get_unaligned_le16(&raid_map->row_cnt);
956
957 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
958 err_msg = "invalid number of map entries in RAID map";
959 goto bad_raid_map;
960 }
961
962 if (device->raid_level == SA_RAID_1) {
963 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
964 err_msg = "invalid RAID-1 map";
965 goto bad_raid_map;
966 }
967 } else if (device->raid_level == SA_RAID_ADM) {
968 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
969 err_msg = "invalid RAID-1(ADM) map";
970 goto bad_raid_map;
971 }
972 } else if ((device->raid_level == SA_RAID_5 ||
973 device->raid_level == SA_RAID_6) &&
974 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
975 /* RAID 50/60 */
976 r5or6_blocks_per_row =
977 get_unaligned_le16(&raid_map->strip_size) *
978 get_unaligned_le16(&raid_map->data_disks_per_row);
979 if (r5or6_blocks_per_row == 0) {
980 err_msg = "invalid RAID-5 or RAID-6 map";
981 goto bad_raid_map;
982 }
983 }
984
985 return 0;
986
987bad_raid_map:
988 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
989
990 return -EINVAL;
991}
992
993static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
994 struct pqi_scsi_dev *device)
995{
996 int rc;
997 int pci_direction;
998 struct pqi_raid_path_request request;
999 struct raid_map *raid_map;
1000
1001 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1002 if (!raid_map)
1003 return -ENOMEM;
1004
1005 rc = pqi_build_raid_path_request(ctrl_info, &request,
1006 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1007 sizeof(*raid_map), 0, &pci_direction);
1008 if (rc)
1009 goto error;
1010
1011 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1012 NULL, NO_TIMEOUT);
1013
1014 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1015 pci_direction);
1016
1017 if (rc)
1018 goto error;
1019
1020 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1021 if (rc)
1022 goto error;
1023
1024 device->raid_map = raid_map;
1025
1026 return 0;
1027
1028error:
1029 kfree(raid_map);
1030
1031 return rc;
1032}
1033
1034static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1035 struct pqi_scsi_dev *device)
1036{
1037 int rc;
1038 u8 *buffer;
1039 u8 offload_status;
1040
1041 buffer = kmalloc(64, GFP_KERNEL);
1042 if (!buffer)
1043 return;
1044
1045 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1046 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1047 if (rc)
1048 goto out;
1049
1050#define OFFLOAD_STATUS_BYTE 4
1051#define OFFLOAD_CONFIGURED_BIT 0x1
1052#define OFFLOAD_ENABLED_BIT 0x2
1053
1054 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1055 device->offload_configured =
1056 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1057 if (device->offload_configured) {
1058 device->offload_enabled_pending =
1059 !!(offload_status & OFFLOAD_ENABLED_BIT);
1060 if (pqi_get_raid_map(ctrl_info, device))
1061 device->offload_enabled_pending = false;
1062 }
1063
1064out:
1065 kfree(buffer);
1066}
1067
1068/*
1069 * Use vendor-specific VPD to determine online/offline status of a volume.
1070 */
1071
1072static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1073 struct pqi_scsi_dev *device)
1074{
1075 int rc;
1076 size_t page_length;
1077 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1078 bool volume_offline = true;
1079 u32 volume_flags;
1080 struct ciss_vpd_logical_volume_status *vpd;
1081
1082 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1083 if (!vpd)
1084 goto no_buffer;
1085
1086 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1087 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1088 if (rc)
1089 goto out;
1090
1091 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1092 volume_status) + vpd->page_length;
1093 if (page_length < sizeof(*vpd))
1094 goto out;
1095
1096 volume_status = vpd->volume_status;
1097 volume_flags = get_unaligned_be32(&vpd->flags);
1098 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1099
1100out:
1101 kfree(vpd);
1102no_buffer:
1103 device->volume_status = volume_status;
1104 device->volume_offline = volume_offline;
1105}
1106
1107static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1108 struct pqi_scsi_dev *device)
1109{
1110 int rc;
1111 u8 *buffer;
1112
1113 buffer = kmalloc(64, GFP_KERNEL);
1114 if (!buffer)
1115 return -ENOMEM;
1116
1117 /* Send an inquiry to the device to see what it is. */
1118 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1119 if (rc)
1120 goto out;
1121
1122 scsi_sanitize_inquiry_string(&buffer[8], 8);
1123 scsi_sanitize_inquiry_string(&buffer[16], 16);
1124
1125 device->devtype = buffer[0] & 0x1f;
1126 memcpy(device->vendor, &buffer[8],
1127 sizeof(device->vendor));
1128 memcpy(device->model, &buffer[16],
1129 sizeof(device->model));
1130
1131 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1132 pqi_get_raid_level(ctrl_info, device);
1133 pqi_get_offload_status(ctrl_info, device);
1134 pqi_get_volume_status(ctrl_info, device);
1135 }
1136
1137out:
1138 kfree(buffer);
1139
1140 return rc;
1141}
1142
1143static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1144 struct pqi_scsi_dev *device,
1145 struct bmic_identify_physical_device *id_phys)
1146{
1147 int rc;
1148
1149 memset(id_phys, 0, sizeof(*id_phys));
1150
1151 rc = pqi_identify_physical_device(ctrl_info, device,
1152 id_phys, sizeof(*id_phys));
1153 if (rc) {
1154 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1155 return;
1156 }
1157
1158 device->queue_depth =
1159 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1160 device->device_type = id_phys->device_type;
1161 device->active_path_index = id_phys->active_path_number;
1162 device->path_map = id_phys->redundant_path_present_map;
1163 memcpy(&device->box,
1164 &id_phys->alternate_paths_phys_box_on_port,
1165 sizeof(device->box));
1166 memcpy(&device->phys_connector,
1167 &id_phys->alternate_paths_phys_connector,
1168 sizeof(device->phys_connector));
1169 device->bay = id_phys->phys_bay_in_box;
1170}
1171
1172static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1173 struct pqi_scsi_dev *device)
1174{
1175 char *status;
1176 static const char unknown_state_str[] =
1177 "Volume is in an unknown state (%u)";
1178 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1179
1180 switch (device->volume_status) {
1181 case CISS_LV_OK:
1182 status = "Volume online";
1183 break;
1184 case CISS_LV_FAILED:
1185 status = "Volume failed";
1186 break;
1187 case CISS_LV_NOT_CONFIGURED:
1188 status = "Volume not configured";
1189 break;
1190 case CISS_LV_DEGRADED:
1191 status = "Volume degraded";
1192 break;
1193 case CISS_LV_READY_FOR_RECOVERY:
1194 status = "Volume ready for recovery operation";
1195 break;
1196 case CISS_LV_UNDERGOING_RECOVERY:
1197 status = "Volume undergoing recovery";
1198 break;
1199 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1200 status = "Wrong physical drive was replaced";
1201 break;
1202 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1203 status = "A physical drive not properly connected";
1204 break;
1205 case CISS_LV_HARDWARE_OVERHEATING:
1206 status = "Hardware is overheating";
1207 break;
1208 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1209 status = "Hardware has overheated";
1210 break;
1211 case CISS_LV_UNDERGOING_EXPANSION:
1212 status = "Volume undergoing expansion";
1213 break;
1214 case CISS_LV_NOT_AVAILABLE:
1215 status = "Volume waiting for transforming volume";
1216 break;
1217 case CISS_LV_QUEUED_FOR_EXPANSION:
1218 status = "Volume queued for expansion";
1219 break;
1220 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1221 status = "Volume disabled due to SCSI ID conflict";
1222 break;
1223 case CISS_LV_EJECTED:
1224 status = "Volume has been ejected";
1225 break;
1226 case CISS_LV_UNDERGOING_ERASE:
1227 status = "Volume undergoing background erase";
1228 break;
1229 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1230 status = "Volume ready for predictive spare rebuild";
1231 break;
1232 case CISS_LV_UNDERGOING_RPI:
1233 status = "Volume undergoing rapid parity initialization";
1234 break;
1235 case CISS_LV_PENDING_RPI:
1236 status = "Volume queued for rapid parity initialization";
1237 break;
1238 case CISS_LV_ENCRYPTED_NO_KEY:
1239 status = "Encrypted volume inaccessible - key not present";
1240 break;
1241 case CISS_LV_UNDERGOING_ENCRYPTION:
1242 status = "Volume undergoing encryption process";
1243 break;
1244 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1245 status = "Volume undergoing encryption re-keying process";
1246 break;
1247 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1248 status =
1249 "Encrypted volume inaccessible - disabled on ctrl";
1250 break;
1251 case CISS_LV_PENDING_ENCRYPTION:
1252 status = "Volume pending migration to encrypted state";
1253 break;
1254 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1255 status = "Volume pending encryption rekeying";
1256 break;
1257 case CISS_LV_NOT_SUPPORTED:
1258 status = "Volume not supported on this controller";
1259 break;
1260 case CISS_LV_STATUS_UNAVAILABLE:
1261 status = "Volume status not available";
1262 break;
1263 default:
1264 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1265 unknown_state_str, device->volume_status);
1266 status = unknown_state_buffer;
1267 break;
1268 }
1269
1270 dev_info(&ctrl_info->pci_dev->dev,
1271 "scsi %d:%d:%d:%d %s\n",
1272 ctrl_info->scsi_host->host_no,
1273 device->bus, device->target, device->lun, status);
1274}
1275
1276static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1277 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1278{
1279 struct pqi_scsi_dev *device;
1280
1281 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1282 scsi_device_list_entry) {
1283 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1284 continue;
1285 if (pqi_is_logical_device(device))
1286 continue;
1287 if (device->aio_handle == aio_handle)
1288 return device;
1289 }
1290
1291 return NULL;
1292}
1293
1294static void pqi_update_logical_drive_queue_depth(
1295 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1296{
1297 unsigned int i;
1298 struct raid_map *raid_map;
1299 struct raid_map_disk_data *disk_data;
1300 struct pqi_scsi_dev *phys_disk;
1301 unsigned int num_phys_disks;
1302 unsigned int num_raid_map_entries;
1303 unsigned int queue_depth;
1304
1305 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1306
1307 raid_map = logical_drive->raid_map;
1308 if (!raid_map)
1309 return;
1310
1311 disk_data = raid_map->disk_data;
1312 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1313 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1314 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1315 num_raid_map_entries = num_phys_disks *
1316 get_unaligned_le16(&raid_map->row_cnt);
1317
1318 queue_depth = 0;
1319 for (i = 0; i < num_raid_map_entries; i++) {
1320 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1321 disk_data[i].aio_handle);
1322
1323 if (!phys_disk) {
1324 dev_warn(&ctrl_info->pci_dev->dev,
1325 "failed to find physical disk for logical drive %016llx\n",
1326 get_unaligned_be64(logical_drive->scsi3addr));
1327 logical_drive->offload_enabled = false;
1328 logical_drive->offload_enabled_pending = false;
1329 kfree(raid_map);
1330 logical_drive->raid_map = NULL;
1331 return;
1332 }
1333
1334 queue_depth += phys_disk->queue_depth;
1335 }
1336
1337 logical_drive->queue_depth = queue_depth;
1338}
1339
1340static void pqi_update_all_logical_drive_queue_depths(
1341 struct pqi_ctrl_info *ctrl_info)
1342{
1343 struct pqi_scsi_dev *device;
1344
1345 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1346 scsi_device_list_entry) {
1347 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1348 continue;
1349 if (!pqi_is_logical_device(device))
1350 continue;
1351 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1352 }
1353}
1354
1355static void pqi_rescan_worker(struct work_struct *work)
1356{
1357 struct pqi_ctrl_info *ctrl_info;
1358
1359 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1360 rescan_work);
1361
1362 pqi_scan_scsi_devices(ctrl_info);
1363}
1364
1365static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1366 struct pqi_scsi_dev *device)
1367{
1368 int rc;
1369
1370 if (pqi_is_logical_device(device))
1371 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1372 device->target, device->lun);
1373 else
1374 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1375
1376 return rc;
1377}
1378
1379static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1380 struct pqi_scsi_dev *device)
1381{
1382 if (pqi_is_logical_device(device))
1383 scsi_remove_device(device->sdev);
1384 else
1385 pqi_remove_sas_device(device);
1386}
1387
1388/* Assumes the SCSI device list lock is held. */
1389
1390static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1391 int bus, int target, int lun)
1392{
1393 struct pqi_scsi_dev *device;
1394
1395 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1396 scsi_device_list_entry)
1397 if (device->bus == bus && device->target == target &&
1398 device->lun == lun)
1399 return device;
1400
1401 return NULL;
1402}
1403
1404static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1405 struct pqi_scsi_dev *dev2)
1406{
1407 if (dev1->is_physical_device != dev2->is_physical_device)
1408 return false;
1409
1410 if (dev1->is_physical_device)
1411 return dev1->wwid == dev2->wwid;
1412
1413 return memcmp(dev1->volume_id, dev2->volume_id,
1414 sizeof(dev1->volume_id)) == 0;
1415}
1416
1417enum pqi_find_result {
1418 DEVICE_NOT_FOUND,
1419 DEVICE_CHANGED,
1420 DEVICE_SAME,
1421};
1422
1423static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1424 struct pqi_scsi_dev *device_to_find,
1425 struct pqi_scsi_dev **matching_device)
1426{
1427 struct pqi_scsi_dev *device;
1428
1429 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1430 scsi_device_list_entry) {
1431 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1432 device->scsi3addr)) {
1433 *matching_device = device;
1434 if (pqi_device_equal(device_to_find, device)) {
1435 if (device_to_find->volume_offline)
1436 return DEVICE_CHANGED;
1437 return DEVICE_SAME;
1438 }
1439 return DEVICE_CHANGED;
1440 }
1441 }
1442
1443 return DEVICE_NOT_FOUND;
1444}
1445
1446static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1447 char *action, struct pqi_scsi_dev *device)
1448{
1449 dev_info(&ctrl_info->pci_dev->dev,
1450 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1451 action,
1452 ctrl_info->scsi_host->host_no,
1453 device->bus,
1454 device->target,
1455 device->lun,
1456 scsi_device_type(device->devtype),
1457 device->vendor,
1458 device->model,
1459 pqi_raid_level_to_string(device->raid_level),
1460 device->offload_configured ? '+' : '-',
1461 device->offload_enabled_pending ? '+' : '-',
1462 device->expose_device ? '+' : '-',
1463 device->queue_depth);
1464}
1465
1466/* Assumes the SCSI device list lock is held. */
1467
1468static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1469 struct pqi_scsi_dev *new_device)
1470{
1471 existing_device->devtype = new_device->devtype;
1472 existing_device->device_type = new_device->device_type;
1473 existing_device->bus = new_device->bus;
1474 if (new_device->target_lun_valid) {
1475 existing_device->target = new_device->target;
1476 existing_device->lun = new_device->lun;
1477 existing_device->target_lun_valid = true;
1478 }
1479
1480 /* By definition, the scsi3addr and wwid fields are already the same. */
1481
1482 existing_device->is_physical_device = new_device->is_physical_device;
1483 existing_device->expose_device = new_device->expose_device;
1484 existing_device->no_uld_attach = new_device->no_uld_attach;
1485 existing_device->aio_enabled = new_device->aio_enabled;
1486 memcpy(existing_device->vendor, new_device->vendor,
1487 sizeof(existing_device->vendor));
1488 memcpy(existing_device->model, new_device->model,
1489 sizeof(existing_device->model));
1490 existing_device->sas_address = new_device->sas_address;
1491 existing_device->raid_level = new_device->raid_level;
1492 existing_device->queue_depth = new_device->queue_depth;
1493 existing_device->aio_handle = new_device->aio_handle;
1494 existing_device->volume_status = new_device->volume_status;
1495 existing_device->active_path_index = new_device->active_path_index;
1496 existing_device->path_map = new_device->path_map;
1497 existing_device->bay = new_device->bay;
1498 memcpy(existing_device->box, new_device->box,
1499 sizeof(existing_device->box));
1500 memcpy(existing_device->phys_connector, new_device->phys_connector,
1501 sizeof(existing_device->phys_connector));
1502 existing_device->offload_configured = new_device->offload_configured;
1503 existing_device->offload_enabled = false;
1504 existing_device->offload_enabled_pending =
1505 new_device->offload_enabled_pending;
1506 existing_device->offload_to_mirror = 0;
1507 kfree(existing_device->raid_map);
1508 existing_device->raid_map = new_device->raid_map;
1509
1510 /* To prevent this from being freed later. */
1511 new_device->raid_map = NULL;
1512}
1513
1514static inline void pqi_free_device(struct pqi_scsi_dev *device)
1515{
1516 if (device) {
1517 kfree(device->raid_map);
1518 kfree(device);
1519 }
1520}
1521
1522/*
1523 * Called when exposing a new device to the OS fails in order to re-adjust
1524 * our internal SCSI device list to match the SCSI ML's view.
1525 */
1526
1527static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1528 struct pqi_scsi_dev *device)
1529{
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1533 list_del(&device->scsi_device_list_entry);
1534 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1535
1536 /* Allow the device structure to be freed later. */
1537 device->keep_device = false;
1538}
1539
1540static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1541 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1542{
1543 int rc;
1544 unsigned int i;
1545 unsigned long flags;
1546 enum pqi_find_result find_result;
1547 struct pqi_scsi_dev *device;
1548 struct pqi_scsi_dev *next;
1549 struct pqi_scsi_dev *matching_device;
1550 struct list_head add_list;
1551 struct list_head delete_list;
1552
1553 INIT_LIST_HEAD(&add_list);
1554 INIT_LIST_HEAD(&delete_list);
1555
1556 /*
1557 * The idea here is to do as little work as possible while holding the
1558 * spinlock. That's why we go to great pains to defer anything other
1559 * than updating the internal device list until after we release the
1560 * spinlock.
1561 */
1562
1563 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1564
1565 /* Assume that all devices in the existing list have gone away. */
1566 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1567 scsi_device_list_entry)
1568 device->device_gone = true;
1569
1570 for (i = 0; i < num_new_devices; i++) {
1571 device = new_device_list[i];
1572
1573 find_result = pqi_scsi_find_entry(ctrl_info, device,
1574 &matching_device);
1575
1576 switch (find_result) {
1577 case DEVICE_SAME:
1578 /*
1579 * The newly found device is already in the existing
1580 * device list.
1581 */
1582 device->new_device = false;
1583 matching_device->device_gone = false;
1584 pqi_scsi_update_device(matching_device, device);
1585 break;
1586 case DEVICE_NOT_FOUND:
1587 /*
1588 * The newly found device is NOT in the existing device
1589 * list.
1590 */
1591 device->new_device = true;
1592 break;
1593 case DEVICE_CHANGED:
1594 /*
1595 * The original device has gone away and we need to add
1596 * the new device.
1597 */
1598 device->new_device = true;
1599 break;
1600 default:
1601 WARN_ON(find_result);
1602 break;
1603 }
1604 }
1605
1606 /* Process all devices that have gone away. */
1607 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1608 scsi_device_list_entry) {
1609 if (device->device_gone) {
1610 list_del(&device->scsi_device_list_entry);
1611 list_add_tail(&device->delete_list_entry, &delete_list);
1612 }
1613 }
1614
1615 /* Process all new devices. */
1616 for (i = 0; i < num_new_devices; i++) {
1617 device = new_device_list[i];
1618 if (!device->new_device)
1619 continue;
1620 if (device->volume_offline)
1621 continue;
1622 list_add_tail(&device->scsi_device_list_entry,
1623 &ctrl_info->scsi_device_list);
1624 list_add_tail(&device->add_list_entry, &add_list);
1625 /* To prevent this device structure from being freed later. */
1626 device->keep_device = true;
1627 }
1628
1629 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1630
1631 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1632 scsi_device_list_entry)
1633 device->offload_enabled =
1634 device->offload_enabled_pending;
1635
1636 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1637
1638 /* Remove all devices that have gone away. */
1639 list_for_each_entry_safe(device, next, &delete_list,
1640 delete_list_entry) {
1641 if (device->sdev)
1642 pqi_remove_device(ctrl_info, device);
1643 if (device->volume_offline) {
1644 pqi_dev_info(ctrl_info, "offline", device);
1645 pqi_show_volume_status(ctrl_info, device);
1646 } else {
1647 pqi_dev_info(ctrl_info, "removed", device);
1648 }
1649 list_del(&device->delete_list_entry);
1650 pqi_free_device(device);
1651 }
1652
1653 /*
1654 * Notify the SCSI ML if the queue depth of any existing device has
1655 * changed.
1656 */
1657 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1658 scsi_device_list_entry) {
1659 if (device->sdev && device->queue_depth !=
1660 device->advertised_queue_depth) {
1661 device->advertised_queue_depth = device->queue_depth;
1662 scsi_change_queue_depth(device->sdev,
1663 device->advertised_queue_depth);
1664 }
1665 }
1666
1667 /* Expose any new devices. */
1668 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1669 if (device->expose_device && !device->sdev) {
1670 rc = pqi_add_device(ctrl_info, device);
1671 if (rc) {
1672 dev_warn(&ctrl_info->pci_dev->dev,
1673 "scsi %d:%d:%d:%d addition failed, device not added\n",
1674 ctrl_info->scsi_host->host_no,
1675 device->bus, device->target,
1676 device->lun);
1677 pqi_fixup_botched_add(ctrl_info, device);
1678 continue;
1679 }
1680 }
1681 pqi_dev_info(ctrl_info, "added", device);
1682 }
1683}
1684
1685static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1686{
1687 bool is_supported = false;
1688
1689 switch (device->devtype) {
1690 case TYPE_DISK:
1691 case TYPE_ZBC:
1692 case TYPE_TAPE:
1693 case TYPE_MEDIUM_CHANGER:
1694 case TYPE_ENCLOSURE:
1695 is_supported = true;
1696 break;
1697 case TYPE_RAID:
1698 /*
1699 * Only support the HBA controller itself as a RAID
1700 * controller. If it's a RAID controller other than
1701 * the HBA itself (an external RAID controller, MSA500
1702 * or similar), we don't support it.
1703 */
1704 if (pqi_is_hba_lunid(device->scsi3addr))
1705 is_supported = true;
1706 break;
1707 }
1708
1709 return is_supported;
1710}
1711
1712static inline bool pqi_skip_device(u8 *scsi3addr,
1713 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1714{
1715 u8 device_flags;
1716
1717 if (!MASKED_DEVICE(scsi3addr))
1718 return false;
1719
1720 /* The device is masked. */
1721
1722 device_flags = phys_lun_ext_entry->device_flags;
1723
1724 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1725 /*
1726 * It's a non-disk device. We ignore all devices of this type
1727 * when they're masked.
1728 */
1729 return true;
1730 }
1731
1732 return false;
1733}
1734
1735static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1736{
1737 /* Expose all devices except for physical devices that are masked. */
1738 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1739 return false;
1740
1741 return true;
1742}
1743
1744static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1745{
1746 int i;
1747 int rc;
1748 struct list_head new_device_list_head;
1749 struct report_phys_lun_extended *physdev_list = NULL;
1750 struct report_log_lun_extended *logdev_list = NULL;
1751 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1752 struct report_log_lun_extended_entry *log_lun_ext_entry;
1753 struct bmic_identify_physical_device *id_phys = NULL;
1754 u32 num_physicals;
1755 u32 num_logicals;
1756 struct pqi_scsi_dev **new_device_list = NULL;
1757 struct pqi_scsi_dev *device;
1758 struct pqi_scsi_dev *next;
1759 unsigned int num_new_devices;
1760 unsigned int num_valid_devices;
1761 bool is_physical_device;
1762 u8 *scsi3addr;
1763 static char *out_of_memory_msg =
1764 "out of memory, device discovery stopped";
1765
1766 INIT_LIST_HEAD(&new_device_list_head);
1767
1768 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1769 if (rc)
1770 goto out;
1771
1772 if (physdev_list)
1773 num_physicals =
1774 get_unaligned_be32(&physdev_list->header.list_length)
1775 / sizeof(physdev_list->lun_entries[0]);
1776 else
1777 num_physicals = 0;
1778
1779 if (logdev_list)
1780 num_logicals =
1781 get_unaligned_be32(&logdev_list->header.list_length)
1782 / sizeof(logdev_list->lun_entries[0]);
1783 else
1784 num_logicals = 0;
1785
1786 if (num_physicals) {
1787 /*
1788 * We need this buffer for calls to pqi_get_physical_disk_info()
1789 * below. We allocate it here instead of inside
1790 * pqi_get_physical_disk_info() because it's a fairly large
1791 * buffer.
1792 */
1793 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1794 if (!id_phys) {
1795 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1796 out_of_memory_msg);
1797 rc = -ENOMEM;
1798 goto out;
1799 }
1800 }
1801
1802 num_new_devices = num_physicals + num_logicals;
1803
1804 new_device_list = kmalloc(sizeof(*new_device_list) *
1805 num_new_devices, GFP_KERNEL);
1806 if (!new_device_list) {
1807 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1808 rc = -ENOMEM;
1809 goto out;
1810 }
1811
1812 for (i = 0; i < num_new_devices; i++) {
1813 device = kzalloc(sizeof(*device), GFP_KERNEL);
1814 if (!device) {
1815 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1816 out_of_memory_msg);
1817 rc = -ENOMEM;
1818 goto out;
1819 }
1820 list_add_tail(&device->new_device_list_entry,
1821 &new_device_list_head);
1822 }
1823
1824 device = NULL;
1825 num_valid_devices = 0;
1826
1827 for (i = 0; i < num_new_devices; i++) {
1828
1829 if (i < num_physicals) {
1830 is_physical_device = true;
1831 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1832 log_lun_ext_entry = NULL;
1833 scsi3addr = phys_lun_ext_entry->lunid;
1834 } else {
1835 is_physical_device = false;
1836 phys_lun_ext_entry = NULL;
1837 log_lun_ext_entry =
1838 &logdev_list->lun_entries[i - num_physicals];
1839 scsi3addr = log_lun_ext_entry->lunid;
1840 }
1841
1842 if (is_physical_device &&
1843 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1844 continue;
1845
1846 if (device)
1847 device = list_next_entry(device, new_device_list_entry);
1848 else
1849 device = list_first_entry(&new_device_list_head,
1850 struct pqi_scsi_dev, new_device_list_entry);
1851
1852 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1853 device->is_physical_device = is_physical_device;
1854 device->raid_level = SA_RAID_UNKNOWN;
1855
1856 /* Gather information about the device. */
1857 rc = pqi_get_device_info(ctrl_info, device);
1858 if (rc == -ENOMEM) {
1859 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1860 out_of_memory_msg);
1861 goto out;
1862 }
1863 if (rc) {
1864 dev_warn(&ctrl_info->pci_dev->dev,
1865 "obtaining device info failed, skipping device %016llx\n",
1866 get_unaligned_be64(device->scsi3addr));
1867 rc = 0;
1868 continue;
1869 }
1870
1871 if (!pqi_is_supported_device(device))
1872 continue;
1873
1874 pqi_assign_bus_target_lun(device);
1875
1876 device->expose_device = pqi_expose_device(device);
1877
1878 if (device->is_physical_device) {
1879 device->wwid = phys_lun_ext_entry->wwid;
1880 if ((phys_lun_ext_entry->device_flags &
1881 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1882 phys_lun_ext_entry->aio_handle)
1883 device->aio_enabled = true;
1884 } else {
1885 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1886 sizeof(device->volume_id));
1887 }
1888
1889 switch (device->devtype) {
1890 case TYPE_DISK:
1891 case TYPE_ZBC:
1892 case TYPE_ENCLOSURE:
1893 if (device->is_physical_device) {
1894 device->sas_address =
1895 get_unaligned_be64(&device->wwid);
1896 if (device->devtype == TYPE_DISK ||
1897 device->devtype == TYPE_ZBC) {
1898 device->aio_handle =
1899 phys_lun_ext_entry->aio_handle;
1900 pqi_get_physical_disk_info(ctrl_info,
1901 device, id_phys);
1902 }
1903 }
1904 break;
1905 }
1906
1907 new_device_list[num_valid_devices++] = device;
1908 }
1909
1910 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1911
1912out:
1913 list_for_each_entry_safe(device, next, &new_device_list_head,
1914 new_device_list_entry) {
1915 if (device->keep_device)
1916 continue;
1917 list_del(&device->new_device_list_entry);
1918 pqi_free_device(device);
1919 }
1920
1921 kfree(new_device_list);
1922 kfree(physdev_list);
1923 kfree(logdev_list);
1924 kfree(id_phys);
1925
1926 return rc;
1927}
1928
1929static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1930{
1931 unsigned long flags;
1932 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001933
Kevin Barnetta37ef742017-05-03 18:52:22 -05001934 while (1) {
1935 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05001936
Kevin Barnetta37ef742017-05-03 18:52:22 -05001937 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1938 struct pqi_scsi_dev, scsi_device_list_entry);
1939 if (device)
1940 list_del(&device->scsi_device_list_entry);
1941
1942 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1943 flags);
1944
1945 if (!device)
1946 break;
1947
Kevin Barnett6c223762016-06-27 16:41:00 -05001948 if (device->sdev)
1949 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001950 pqi_free_device(device);
1951 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001952}
1953
1954static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1955{
1956 int rc;
1957
1958 if (pqi_ctrl_offline(ctrl_info))
1959 return -ENXIO;
1960
1961 mutex_lock(&ctrl_info->scan_mutex);
1962
1963 rc = pqi_update_scsi_devices(ctrl_info);
1964 if (rc)
1965 pqi_schedule_rescan_worker(ctrl_info);
1966
1967 mutex_unlock(&ctrl_info->scan_mutex);
1968
1969 return rc;
1970}
1971
1972static void pqi_scan_start(struct Scsi_Host *shost)
1973{
1974 pqi_scan_scsi_devices(shost_to_hba(shost));
1975}
1976
1977/* Returns TRUE if scan is finished. */
1978
1979static int pqi_scan_finished(struct Scsi_Host *shost,
1980 unsigned long elapsed_time)
1981{
1982 struct pqi_ctrl_info *ctrl_info;
1983
1984 ctrl_info = shost_priv(shost);
1985
1986 return !mutex_is_locked(&ctrl_info->scan_mutex);
1987}
1988
Kevin Barnett061ef062017-05-03 18:53:05 -05001989static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
1990{
1991 mutex_lock(&ctrl_info->scan_mutex);
1992 mutex_unlock(&ctrl_info->scan_mutex);
1993}
1994
1995static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
1996{
1997 mutex_lock(&ctrl_info->lun_reset_mutex);
1998 mutex_unlock(&ctrl_info->lun_reset_mutex);
1999}
2000
Kevin Barnett6c223762016-06-27 16:41:00 -05002001static inline void pqi_set_encryption_info(
2002 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2003 u64 first_block)
2004{
2005 u32 volume_blk_size;
2006
2007 /*
2008 * Set the encryption tweak values based on logical block address.
2009 * If the block size is 512, the tweak value is equal to the LBA.
2010 * For other block sizes, tweak value is (LBA * block size) / 512.
2011 */
2012 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2013 if (volume_blk_size != 512)
2014 first_block = (first_block * volume_blk_size) / 512;
2015
2016 encryption_info->data_encryption_key_index =
2017 get_unaligned_le16(&raid_map->data_encryption_key_index);
2018 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2019 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2020}
2021
2022/*
2023 * Attempt to perform offload RAID mapping for a logical volume I/O.
2024 */
2025
2026#define PQI_RAID_BYPASS_INELIGIBLE 1
2027
2028static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2029 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2030 struct pqi_queue_group *queue_group)
2031{
2032 struct raid_map *raid_map;
2033 bool is_write = false;
2034 u32 map_index;
2035 u64 first_block;
2036 u64 last_block;
2037 u32 block_cnt;
2038 u32 blocks_per_row;
2039 u64 first_row;
2040 u64 last_row;
2041 u32 first_row_offset;
2042 u32 last_row_offset;
2043 u32 first_column;
2044 u32 last_column;
2045 u64 r0_first_row;
2046 u64 r0_last_row;
2047 u32 r5or6_blocks_per_row;
2048 u64 r5or6_first_row;
2049 u64 r5or6_last_row;
2050 u32 r5or6_first_row_offset;
2051 u32 r5or6_last_row_offset;
2052 u32 r5or6_first_column;
2053 u32 r5or6_last_column;
2054 u16 data_disks_per_row;
2055 u32 total_disks_per_row;
2056 u16 layout_map_count;
2057 u32 stripesize;
2058 u16 strip_size;
2059 u32 first_group;
2060 u32 last_group;
2061 u32 current_group;
2062 u32 map_row;
2063 u32 aio_handle;
2064 u64 disk_block;
2065 u32 disk_block_cnt;
2066 u8 cdb[16];
2067 u8 cdb_length;
2068 int offload_to_mirror;
2069 struct pqi_encryption_info *encryption_info_ptr;
2070 struct pqi_encryption_info encryption_info;
2071#if BITS_PER_LONG == 32
2072 u64 tmpdiv;
2073#endif
2074
2075 /* Check for valid opcode, get LBA and block count. */
2076 switch (scmd->cmnd[0]) {
2077 case WRITE_6:
2078 is_write = true;
2079 /* fall through */
2080 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002081 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2082 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002083 block_cnt = (u32)scmd->cmnd[4];
2084 if (block_cnt == 0)
2085 block_cnt = 256;
2086 break;
2087 case WRITE_10:
2088 is_write = true;
2089 /* fall through */
2090 case READ_10:
2091 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2092 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2093 break;
2094 case WRITE_12:
2095 is_write = true;
2096 /* fall through */
2097 case READ_12:
2098 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2099 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2100 break;
2101 case WRITE_16:
2102 is_write = true;
2103 /* fall through */
2104 case READ_16:
2105 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2106 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2107 break;
2108 default:
2109 /* Process via normal I/O path. */
2110 return PQI_RAID_BYPASS_INELIGIBLE;
2111 }
2112
2113 /* Check for write to non-RAID-0. */
2114 if (is_write && device->raid_level != SA_RAID_0)
2115 return PQI_RAID_BYPASS_INELIGIBLE;
2116
2117 if (unlikely(block_cnt == 0))
2118 return PQI_RAID_BYPASS_INELIGIBLE;
2119
2120 last_block = first_block + block_cnt - 1;
2121 raid_map = device->raid_map;
2122
2123 /* Check for invalid block or wraparound. */
2124 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2125 last_block < first_block)
2126 return PQI_RAID_BYPASS_INELIGIBLE;
2127
2128 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2129 strip_size = get_unaligned_le16(&raid_map->strip_size);
2130 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2131
2132 /* Calculate stripe information for the request. */
2133 blocks_per_row = data_disks_per_row * strip_size;
2134#if BITS_PER_LONG == 32
2135 tmpdiv = first_block;
2136 do_div(tmpdiv, blocks_per_row);
2137 first_row = tmpdiv;
2138 tmpdiv = last_block;
2139 do_div(tmpdiv, blocks_per_row);
2140 last_row = tmpdiv;
2141 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2142 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2143 tmpdiv = first_row_offset;
2144 do_div(tmpdiv, strip_size);
2145 first_column = tmpdiv;
2146 tmpdiv = last_row_offset;
2147 do_div(tmpdiv, strip_size);
2148 last_column = tmpdiv;
2149#else
2150 first_row = first_block / blocks_per_row;
2151 last_row = last_block / blocks_per_row;
2152 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2153 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2154 first_column = first_row_offset / strip_size;
2155 last_column = last_row_offset / strip_size;
2156#endif
2157
2158 /* If this isn't a single row/column then give to the controller. */
2159 if (first_row != last_row || first_column != last_column)
2160 return PQI_RAID_BYPASS_INELIGIBLE;
2161
2162 /* Proceeding with driver mapping. */
2163 total_disks_per_row = data_disks_per_row +
2164 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2165 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2166 get_unaligned_le16(&raid_map->row_cnt);
2167 map_index = (map_row * total_disks_per_row) + first_column;
2168
2169 /* RAID 1 */
2170 if (device->raid_level == SA_RAID_1) {
2171 if (device->offload_to_mirror)
2172 map_index += data_disks_per_row;
2173 device->offload_to_mirror = !device->offload_to_mirror;
2174 } else if (device->raid_level == SA_RAID_ADM) {
2175 /* RAID ADM */
2176 /*
2177 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2178 * divisible by 3.
2179 */
2180 offload_to_mirror = device->offload_to_mirror;
2181 if (offload_to_mirror == 0) {
2182 /* use physical disk in the first mirrored group. */
2183 map_index %= data_disks_per_row;
2184 } else {
2185 do {
2186 /*
2187 * Determine mirror group that map_index
2188 * indicates.
2189 */
2190 current_group = map_index / data_disks_per_row;
2191
2192 if (offload_to_mirror != current_group) {
2193 if (current_group <
2194 layout_map_count - 1) {
2195 /*
2196 * Select raid index from
2197 * next group.
2198 */
2199 map_index += data_disks_per_row;
2200 current_group++;
2201 } else {
2202 /*
2203 * Select raid index from first
2204 * group.
2205 */
2206 map_index %= data_disks_per_row;
2207 current_group = 0;
2208 }
2209 }
2210 } while (offload_to_mirror != current_group);
2211 }
2212
2213 /* Set mirror group to use next time. */
2214 offload_to_mirror =
2215 (offload_to_mirror >= layout_map_count - 1) ?
2216 0 : offload_to_mirror + 1;
2217 WARN_ON(offload_to_mirror >= layout_map_count);
2218 device->offload_to_mirror = offload_to_mirror;
2219 /*
2220 * Avoid direct use of device->offload_to_mirror within this
2221 * function since multiple threads might simultaneously
2222 * increment it beyond the range of device->layout_map_count -1.
2223 */
2224 } else if ((device->raid_level == SA_RAID_5 ||
2225 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2226 /* RAID 50/60 */
2227 /* Verify first and last block are in same RAID group */
2228 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2229 stripesize = r5or6_blocks_per_row * layout_map_count;
2230#if BITS_PER_LONG == 32
2231 tmpdiv = first_block;
2232 first_group = do_div(tmpdiv, stripesize);
2233 tmpdiv = first_group;
2234 do_div(tmpdiv, r5or6_blocks_per_row);
2235 first_group = tmpdiv;
2236 tmpdiv = last_block;
2237 last_group = do_div(tmpdiv, stripesize);
2238 tmpdiv = last_group;
2239 do_div(tmpdiv, r5or6_blocks_per_row);
2240 last_group = tmpdiv;
2241#else
2242 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2243 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2244#endif
2245 if (first_group != last_group)
2246 return PQI_RAID_BYPASS_INELIGIBLE;
2247
2248 /* Verify request is in a single row of RAID 5/6 */
2249#if BITS_PER_LONG == 32
2250 tmpdiv = first_block;
2251 do_div(tmpdiv, stripesize);
2252 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2253 tmpdiv = last_block;
2254 do_div(tmpdiv, stripesize);
2255 r5or6_last_row = r0_last_row = tmpdiv;
2256#else
2257 first_row = r5or6_first_row = r0_first_row =
2258 first_block / stripesize;
2259 r5or6_last_row = r0_last_row = last_block / stripesize;
2260#endif
2261 if (r5or6_first_row != r5or6_last_row)
2262 return PQI_RAID_BYPASS_INELIGIBLE;
2263
2264 /* Verify request is in a single column */
2265#if BITS_PER_LONG == 32
2266 tmpdiv = first_block;
2267 first_row_offset = do_div(tmpdiv, stripesize);
2268 tmpdiv = first_row_offset;
2269 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2270 r5or6_first_row_offset = first_row_offset;
2271 tmpdiv = last_block;
2272 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2273 tmpdiv = r5or6_last_row_offset;
2274 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2275 tmpdiv = r5or6_first_row_offset;
2276 do_div(tmpdiv, strip_size);
2277 first_column = r5or6_first_column = tmpdiv;
2278 tmpdiv = r5or6_last_row_offset;
2279 do_div(tmpdiv, strip_size);
2280 r5or6_last_column = tmpdiv;
2281#else
2282 first_row_offset = r5or6_first_row_offset =
2283 (u32)((first_block % stripesize) %
2284 r5or6_blocks_per_row);
2285
2286 r5or6_last_row_offset =
2287 (u32)((last_block % stripesize) %
2288 r5or6_blocks_per_row);
2289
2290 first_column = r5or6_first_row_offset / strip_size;
2291 r5or6_first_column = first_column;
2292 r5or6_last_column = r5or6_last_row_offset / strip_size;
2293#endif
2294 if (r5or6_first_column != r5or6_last_column)
2295 return PQI_RAID_BYPASS_INELIGIBLE;
2296
2297 /* Request is eligible */
2298 map_row =
2299 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2300 get_unaligned_le16(&raid_map->row_cnt);
2301
2302 map_index = (first_group *
2303 (get_unaligned_le16(&raid_map->row_cnt) *
2304 total_disks_per_row)) +
2305 (map_row * total_disks_per_row) + first_column;
2306 }
2307
2308 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2309 return PQI_RAID_BYPASS_INELIGIBLE;
2310
2311 aio_handle = raid_map->disk_data[map_index].aio_handle;
2312 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2313 first_row * strip_size +
2314 (first_row_offset - first_column * strip_size);
2315 disk_block_cnt = block_cnt;
2316
2317 /* Handle differing logical/physical block sizes. */
2318 if (raid_map->phys_blk_shift) {
2319 disk_block <<= raid_map->phys_blk_shift;
2320 disk_block_cnt <<= raid_map->phys_blk_shift;
2321 }
2322
2323 if (unlikely(disk_block_cnt > 0xffff))
2324 return PQI_RAID_BYPASS_INELIGIBLE;
2325
2326 /* Build the new CDB for the physical disk I/O. */
2327 if (disk_block > 0xffffffff) {
2328 cdb[0] = is_write ? WRITE_16 : READ_16;
2329 cdb[1] = 0;
2330 put_unaligned_be64(disk_block, &cdb[2]);
2331 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2332 cdb[14] = 0;
2333 cdb[15] = 0;
2334 cdb_length = 16;
2335 } else {
2336 cdb[0] = is_write ? WRITE_10 : READ_10;
2337 cdb[1] = 0;
2338 put_unaligned_be32((u32)disk_block, &cdb[2]);
2339 cdb[6] = 0;
2340 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2341 cdb[9] = 0;
2342 cdb_length = 10;
2343 }
2344
2345 if (get_unaligned_le16(&raid_map->flags) &
2346 RAID_MAP_ENCRYPTION_ENABLED) {
2347 pqi_set_encryption_info(&encryption_info, raid_map,
2348 first_block);
2349 encryption_info_ptr = &encryption_info;
2350 } else {
2351 encryption_info_ptr = NULL;
2352 }
2353
2354 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2355 cdb, cdb_length, queue_group, encryption_info_ptr);
2356}
2357
2358#define PQI_STATUS_IDLE 0x0
2359
2360#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2361#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2362
2363#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2364#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2365#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2366#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2367#define PQI_DEVICE_STATE_ERROR 0x4
2368
2369#define PQI_MODE_READY_TIMEOUT_SECS 30
2370#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2371
2372static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2373{
2374 struct pqi_device_registers __iomem *pqi_registers;
2375 unsigned long timeout;
2376 u64 signature;
2377 u8 status;
2378
2379 pqi_registers = ctrl_info->pqi_registers;
2380 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2381
2382 while (1) {
2383 signature = readq(&pqi_registers->signature);
2384 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2385 sizeof(signature)) == 0)
2386 break;
2387 if (time_after(jiffies, timeout)) {
2388 dev_err(&ctrl_info->pci_dev->dev,
2389 "timed out waiting for PQI signature\n");
2390 return -ETIMEDOUT;
2391 }
2392 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2393 }
2394
2395 while (1) {
2396 status = readb(&pqi_registers->function_and_status_code);
2397 if (status == PQI_STATUS_IDLE)
2398 break;
2399 if (time_after(jiffies, timeout)) {
2400 dev_err(&ctrl_info->pci_dev->dev,
2401 "timed out waiting for PQI IDLE\n");
2402 return -ETIMEDOUT;
2403 }
2404 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2405 }
2406
2407 while (1) {
2408 if (readl(&pqi_registers->device_status) ==
2409 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2410 break;
2411 if (time_after(jiffies, timeout)) {
2412 dev_err(&ctrl_info->pci_dev->dev,
2413 "timed out waiting for PQI all registers ready\n");
2414 return -ETIMEDOUT;
2415 }
2416 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2417 }
2418
2419 return 0;
2420}
2421
2422static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2423{
2424 struct pqi_scsi_dev *device;
2425
2426 device = io_request->scmd->device->hostdata;
2427 device->offload_enabled = false;
2428}
2429
2430static inline void pqi_take_device_offline(struct scsi_device *sdev)
2431{
2432 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002433 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002434
2435 if (scsi_device_online(sdev)) {
2436 scsi_device_set_state(sdev, SDEV_OFFLINE);
2437 ctrl_info = shost_to_hba(sdev->host);
2438 schedule_delayed_work(&ctrl_info->rescan_work, 0);
Kevin Barnette58081a2016-08-31 14:54:29 -05002439 device = sdev->hostdata;
2440 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2441 ctrl_info->scsi_host->host_no, device->bus,
2442 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002443 }
2444}
2445
2446static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2447{
2448 u8 scsi_status;
2449 u8 host_byte;
2450 struct scsi_cmnd *scmd;
2451 struct pqi_raid_error_info *error_info;
2452 size_t sense_data_length;
2453 int residual_count;
2454 int xfer_count;
2455 struct scsi_sense_hdr sshdr;
2456
2457 scmd = io_request->scmd;
2458 if (!scmd)
2459 return;
2460
2461 error_info = io_request->error_info;
2462 scsi_status = error_info->status;
2463 host_byte = DID_OK;
2464
2465 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2466 xfer_count =
2467 get_unaligned_le32(&error_info->data_out_transferred);
2468 residual_count = scsi_bufflen(scmd) - xfer_count;
2469 scsi_set_resid(scmd, residual_count);
2470 if (xfer_count < scmd->underflow)
2471 host_byte = DID_SOFT_ERROR;
2472 }
2473
2474 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2475 if (sense_data_length == 0)
2476 sense_data_length =
2477 get_unaligned_le16(&error_info->response_data_length);
2478 if (sense_data_length) {
2479 if (sense_data_length > sizeof(error_info->data))
2480 sense_data_length = sizeof(error_info->data);
2481
2482 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2483 scsi_normalize_sense(error_info->data,
2484 sense_data_length, &sshdr) &&
2485 sshdr.sense_key == HARDWARE_ERROR &&
2486 sshdr.asc == 0x3e &&
2487 sshdr.ascq == 0x1) {
2488 pqi_take_device_offline(scmd->device);
2489 host_byte = DID_NO_CONNECT;
2490 }
2491
2492 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2493 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2494 memcpy(scmd->sense_buffer, error_info->data,
2495 sense_data_length);
2496 }
2497
2498 scmd->result = scsi_status;
2499 set_host_byte(scmd, host_byte);
2500}
2501
2502static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2503{
2504 u8 scsi_status;
2505 u8 host_byte;
2506 struct scsi_cmnd *scmd;
2507 struct pqi_aio_error_info *error_info;
2508 size_t sense_data_length;
2509 int residual_count;
2510 int xfer_count;
2511 bool device_offline;
2512
2513 scmd = io_request->scmd;
2514 error_info = io_request->error_info;
2515 host_byte = DID_OK;
2516 sense_data_length = 0;
2517 device_offline = false;
2518
2519 switch (error_info->service_response) {
2520 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2521 scsi_status = error_info->status;
2522 break;
2523 case PQI_AIO_SERV_RESPONSE_FAILURE:
2524 switch (error_info->status) {
2525 case PQI_AIO_STATUS_IO_ABORTED:
2526 scsi_status = SAM_STAT_TASK_ABORTED;
2527 break;
2528 case PQI_AIO_STATUS_UNDERRUN:
2529 scsi_status = SAM_STAT_GOOD;
2530 residual_count = get_unaligned_le32(
2531 &error_info->residual_count);
2532 scsi_set_resid(scmd, residual_count);
2533 xfer_count = scsi_bufflen(scmd) - residual_count;
2534 if (xfer_count < scmd->underflow)
2535 host_byte = DID_SOFT_ERROR;
2536 break;
2537 case PQI_AIO_STATUS_OVERRUN:
2538 scsi_status = SAM_STAT_GOOD;
2539 break;
2540 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2541 pqi_aio_path_disabled(io_request);
2542 scsi_status = SAM_STAT_GOOD;
2543 io_request->status = -EAGAIN;
2544 break;
2545 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2546 case PQI_AIO_STATUS_INVALID_DEVICE:
2547 device_offline = true;
2548 pqi_take_device_offline(scmd->device);
2549 host_byte = DID_NO_CONNECT;
2550 scsi_status = SAM_STAT_CHECK_CONDITION;
2551 break;
2552 case PQI_AIO_STATUS_IO_ERROR:
2553 default:
2554 scsi_status = SAM_STAT_CHECK_CONDITION;
2555 break;
2556 }
2557 break;
2558 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2559 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2560 scsi_status = SAM_STAT_GOOD;
2561 break;
2562 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2563 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2564 default:
2565 scsi_status = SAM_STAT_CHECK_CONDITION;
2566 break;
2567 }
2568
2569 if (error_info->data_present) {
2570 sense_data_length =
2571 get_unaligned_le16(&error_info->data_length);
2572 if (sense_data_length) {
2573 if (sense_data_length > sizeof(error_info->data))
2574 sense_data_length = sizeof(error_info->data);
2575 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2576 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2577 memcpy(scmd->sense_buffer, error_info->data,
2578 sense_data_length);
2579 }
2580 }
2581
2582 if (device_offline && sense_data_length == 0)
2583 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2584 0x3e, 0x1);
2585
2586 scmd->result = scsi_status;
2587 set_host_byte(scmd, host_byte);
2588}
2589
2590static void pqi_process_io_error(unsigned int iu_type,
2591 struct pqi_io_request *io_request)
2592{
2593 switch (iu_type) {
2594 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2595 pqi_process_raid_io_error(io_request);
2596 break;
2597 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2598 pqi_process_aio_io_error(io_request);
2599 break;
2600 }
2601}
2602
2603static int pqi_interpret_task_management_response(
2604 struct pqi_task_management_response *response)
2605{
2606 int rc;
2607
2608 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002609 case SOP_TMF_COMPLETE:
2610 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002611 rc = 0;
2612 break;
2613 default:
2614 rc = -EIO;
2615 break;
2616 }
2617
2618 return rc;
2619}
2620
2621static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2622 struct pqi_queue_group *queue_group)
2623{
2624 unsigned int num_responses;
2625 pqi_index_t oq_pi;
2626 pqi_index_t oq_ci;
2627 struct pqi_io_request *io_request;
2628 struct pqi_io_response *response;
2629 u16 request_id;
2630
2631 num_responses = 0;
2632 oq_ci = queue_group->oq_ci_copy;
2633
2634 while (1) {
2635 oq_pi = *queue_group->oq_pi;
2636 if (oq_pi == oq_ci)
2637 break;
2638
2639 num_responses++;
2640 response = queue_group->oq_element_array +
2641 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2642
2643 request_id = get_unaligned_le16(&response->request_id);
2644 WARN_ON(request_id >= ctrl_info->max_io_slots);
2645
2646 io_request = &ctrl_info->io_request_pool[request_id];
2647 WARN_ON(atomic_read(&io_request->refcount) == 0);
2648
2649 switch (response->header.iu_type) {
2650 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2651 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2652 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2653 break;
2654 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2655 io_request->status =
2656 pqi_interpret_task_management_response(
2657 (void *)response);
2658 break;
2659 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2660 pqi_aio_path_disabled(io_request);
2661 io_request->status = -EAGAIN;
2662 break;
2663 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2664 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2665 io_request->error_info = ctrl_info->error_buffer +
2666 (get_unaligned_le16(&response->error_index) *
2667 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2668 pqi_process_io_error(response->header.iu_type,
2669 io_request);
2670 break;
2671 default:
2672 dev_err(&ctrl_info->pci_dev->dev,
2673 "unexpected IU type: 0x%x\n",
2674 response->header.iu_type);
2675 WARN_ON(response->header.iu_type);
2676 break;
2677 }
2678
2679 io_request->io_complete_callback(io_request,
2680 io_request->context);
2681
2682 /*
2683 * Note that the I/O request structure CANNOT BE TOUCHED after
2684 * returning from the I/O completion callback!
2685 */
2686
2687 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2688 }
2689
2690 if (num_responses) {
2691 queue_group->oq_ci_copy = oq_ci;
2692 writel(oq_ci, queue_group->oq_ci);
2693 }
2694
2695 return num_responses;
2696}
2697
2698static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002699 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002700{
2701 unsigned int num_elements_used;
2702
2703 if (pi >= ci)
2704 num_elements_used = pi - ci;
2705 else
2706 num_elements_used = elements_in_queue - ci + pi;
2707
2708 return elements_in_queue - num_elements_used - 1;
2709}
2710
2711#define PQI_EVENT_ACK_TIMEOUT 30
2712
2713static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2714 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2715{
2716 pqi_index_t iq_pi;
2717 pqi_index_t iq_ci;
2718 unsigned long flags;
2719 void *next_element;
2720 unsigned long timeout;
2721 struct pqi_queue_group *queue_group;
2722
2723 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2724 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2725
2726 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2727
2728 while (1) {
2729 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2730
2731 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2732 iq_ci = *queue_group->iq_ci[RAID_PATH];
2733
2734 if (pqi_num_elements_free(iq_pi, iq_ci,
2735 ctrl_info->num_elements_per_iq))
2736 break;
2737
2738 spin_unlock_irqrestore(
2739 &queue_group->submit_lock[RAID_PATH], flags);
2740
2741 if (time_after(jiffies, timeout)) {
2742 dev_err(&ctrl_info->pci_dev->dev,
2743 "sending event acknowledge timed out\n");
2744 return;
2745 }
2746 }
2747
2748 next_element = queue_group->iq_element_array[RAID_PATH] +
2749 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2750
2751 memcpy(next_element, iu, iu_length);
2752
2753 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2754
2755 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2756
2757 /*
2758 * This write notifies the controller that an IU is available to be
2759 * processed.
2760 */
2761 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2762
2763 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002764}
2765
2766static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2767 struct pqi_event *event)
2768{
2769 struct pqi_event_acknowledge_request request;
2770
2771 memset(&request, 0, sizeof(request));
2772
2773 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2774 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2775 &request.header.iu_length);
2776 request.event_type = event->event_type;
2777 request.event_id = event->event_id;
2778 request.additional_event_id = event->additional_event_id;
2779
2780 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2781}
2782
2783static void pqi_event_worker(struct work_struct *work)
2784{
2785 unsigned int i;
2786 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002787 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002788 bool got_non_heartbeat_event = false;
2789
2790 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2791
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002792 pqi_ctrl_busy(ctrl_info);
2793 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2794
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002795 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05002796 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002797 if (event->pending) {
2798 event->pending = false;
2799 pqi_acknowledge_event(ctrl_info, event);
2800 if (i != PQI_EVENT_TYPE_HEARTBEAT)
Kevin Barnett6c223762016-06-27 16:41:00 -05002801 got_non_heartbeat_event = true;
2802 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002803 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05002804 }
2805
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002806 pqi_ctrl_unbusy(ctrl_info);
2807
2808 pqi_schedule_rescan_worker(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002809}
2810
2811static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2812{
2813 unsigned int i;
2814 unsigned int path;
2815 struct pqi_queue_group *queue_group;
2816 unsigned long flags;
2817 struct pqi_io_request *io_request;
2818 struct pqi_io_request *next;
2819 struct scsi_cmnd *scmd;
2820
2821 ctrl_info->controller_online = false;
2822 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5b0fba02017-05-03 18:52:40 -05002823 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002824
2825 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2826 queue_group = &ctrl_info->queue_groups[i];
2827
2828 for (path = 0; path < 2; path++) {
2829 spin_lock_irqsave(
2830 &queue_group->submit_lock[path], flags);
2831
2832 list_for_each_entry_safe(io_request, next,
2833 &queue_group->request_list[path],
2834 request_list_entry) {
2835
2836 scmd = io_request->scmd;
2837 if (scmd) {
2838 set_host_byte(scmd, DID_NO_CONNECT);
2839 pqi_scsi_done(scmd);
2840 }
2841
2842 list_del(&io_request->request_list_entry);
2843 }
2844
2845 spin_unlock_irqrestore(
2846 &queue_group->submit_lock[path], flags);
2847 }
2848 }
2849}
2850
2851#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2852#define PQI_MAX_HEARTBEAT_REQUESTS 5
2853
2854static void pqi_heartbeat_timer_handler(unsigned long data)
2855{
2856 int num_interrupts;
2857 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2858
Kevin Barnett061ef062017-05-03 18:53:05 -05002859 if (!ctrl_info->heartbeat_timer_started)
2860 return;
2861
Kevin Barnett6c223762016-06-27 16:41:00 -05002862 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2863
2864 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2865 ctrl_info->num_heartbeats_requested++;
2866 if (ctrl_info->num_heartbeats_requested >
2867 PQI_MAX_HEARTBEAT_REQUESTS) {
2868 pqi_take_ctrl_offline(ctrl_info);
2869 return;
2870 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002871 ctrl_info->events[PQI_EVENT_HEARTBEAT].pending = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05002872 schedule_work(&ctrl_info->event_work);
2873 } else {
2874 ctrl_info->num_heartbeats_requested = 0;
2875 }
2876
2877 ctrl_info->previous_num_interrupts = num_interrupts;
2878 mod_timer(&ctrl_info->heartbeat_timer,
2879 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2880}
2881
2882static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2883{
2884 ctrl_info->previous_num_interrupts =
2885 atomic_read(&ctrl_info->num_interrupts);
2886
2887 init_timer(&ctrl_info->heartbeat_timer);
2888 ctrl_info->heartbeat_timer.expires =
2889 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2890 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2891 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
Kevin Barnett6c223762016-06-27 16:41:00 -05002892 ctrl_info->heartbeat_timer_started = true;
Kevin Barnett061ef062017-05-03 18:53:05 -05002893 add_timer(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05002894}
2895
2896static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2897{
Kevin Barnett061ef062017-05-03 18:53:05 -05002898 if (ctrl_info->heartbeat_timer_started) {
2899 ctrl_info->heartbeat_timer_started = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05002900 del_timer_sync(&ctrl_info->heartbeat_timer);
Kevin Barnett061ef062017-05-03 18:53:05 -05002901 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002902}
2903
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002904static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05002905{
2906 int index;
2907
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002908 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2909 if (event_type == pqi_supported_event_types[index])
2910 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05002911
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002912 return -1;
2913}
2914
2915static inline bool pqi_is_supported_event(unsigned int event_type)
2916{
2917 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05002918}
2919
2920static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2921{
2922 unsigned int num_events;
2923 pqi_index_t oq_pi;
2924 pqi_index_t oq_ci;
2925 struct pqi_event_queue *event_queue;
2926 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002927 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002928 bool need_delayed_work;
2929 int event_index;
2930
2931 event_queue = &ctrl_info->event_queue;
2932 num_events = 0;
2933 need_delayed_work = false;
2934 oq_ci = event_queue->oq_ci_copy;
2935
2936 while (1) {
2937 oq_pi = *event_queue->oq_pi;
2938 if (oq_pi == oq_ci)
2939 break;
2940
2941 num_events++;
2942 response = event_queue->oq_element_array +
2943 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2944
2945 event_index =
2946 pqi_event_type_to_event_index(response->event_type);
2947
2948 if (event_index >= 0) {
2949 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002950 event = &ctrl_info->events[event_index];
2951 event->pending = true;
2952 event->event_type = response->event_type;
2953 event->event_id = response->event_id;
2954 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05002955 response->additional_event_id;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002956 if (event_index != PQI_EVENT_TYPE_HEARTBEAT) {
2957 event->pending = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05002958 need_delayed_work = true;
2959 }
2960 }
2961 }
2962
2963 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2964 }
2965
2966 if (num_events) {
2967 event_queue->oq_ci_copy = oq_ci;
2968 writel(oq_ci, event_queue->oq_ci);
2969
2970 if (need_delayed_work)
2971 schedule_work(&ctrl_info->event_work);
2972 }
2973
2974 return num_events;
2975}
2976
Kevin Barnett061ef062017-05-03 18:53:05 -05002977#define PQI_LEGACY_INTX_MASK 0x1
2978
2979static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2980 bool enable_intx)
2981{
2982 u32 intx_mask;
2983 struct pqi_device_registers __iomem *pqi_registers;
2984 volatile void __iomem *register_addr;
2985
2986 pqi_registers = ctrl_info->pqi_registers;
2987
2988 if (enable_intx)
2989 register_addr = &pqi_registers->legacy_intx_mask_clear;
2990 else
2991 register_addr = &pqi_registers->legacy_intx_mask_set;
2992
2993 intx_mask = readl(register_addr);
2994 intx_mask |= PQI_LEGACY_INTX_MASK;
2995 writel(intx_mask, register_addr);
2996}
2997
2998static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
2999 enum pqi_irq_mode new_mode)
3000{
3001 switch (ctrl_info->irq_mode) {
3002 case IRQ_MODE_MSIX:
3003 switch (new_mode) {
3004 case IRQ_MODE_MSIX:
3005 break;
3006 case IRQ_MODE_INTX:
3007 pqi_configure_legacy_intx(ctrl_info, true);
3008 sis_disable_msix(ctrl_info);
3009 sis_enable_intx(ctrl_info);
3010 break;
3011 case IRQ_MODE_NONE:
3012 sis_disable_msix(ctrl_info);
3013 break;
3014 }
3015 break;
3016 case IRQ_MODE_INTX:
3017 switch (new_mode) {
3018 case IRQ_MODE_MSIX:
3019 pqi_configure_legacy_intx(ctrl_info, false);
3020 sis_disable_intx(ctrl_info);
3021 sis_enable_msix(ctrl_info);
3022 break;
3023 case IRQ_MODE_INTX:
3024 break;
3025 case IRQ_MODE_NONE:
3026 pqi_configure_legacy_intx(ctrl_info, false);
3027 sis_disable_intx(ctrl_info);
3028 break;
3029 }
3030 break;
3031 case IRQ_MODE_NONE:
3032 switch (new_mode) {
3033 case IRQ_MODE_MSIX:
3034 sis_enable_msix(ctrl_info);
3035 break;
3036 case IRQ_MODE_INTX:
3037 pqi_configure_legacy_intx(ctrl_info, true);
3038 sis_enable_intx(ctrl_info);
3039 break;
3040 case IRQ_MODE_NONE:
3041 break;
3042 }
3043 break;
3044 }
3045
3046 ctrl_info->irq_mode = new_mode;
3047}
3048
3049#define PQI_LEGACY_INTX_PENDING 0x1
3050
3051static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3052{
3053 bool valid_irq;
3054 u32 intx_status;
3055
3056 switch (ctrl_info->irq_mode) {
3057 case IRQ_MODE_MSIX:
3058 valid_irq = true;
3059 break;
3060 case IRQ_MODE_INTX:
3061 intx_status =
3062 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3063 if (intx_status & PQI_LEGACY_INTX_PENDING)
3064 valid_irq = true;
3065 else
3066 valid_irq = false;
3067 break;
3068 case IRQ_MODE_NONE:
3069 default:
3070 valid_irq = false;
3071 break;
3072 }
3073
3074 return valid_irq;
3075}
3076
Kevin Barnett6c223762016-06-27 16:41:00 -05003077static irqreturn_t pqi_irq_handler(int irq, void *data)
3078{
3079 struct pqi_ctrl_info *ctrl_info;
3080 struct pqi_queue_group *queue_group;
3081 unsigned int num_responses_handled;
3082
3083 queue_group = data;
3084 ctrl_info = queue_group->ctrl_info;
3085
Kevin Barnett061ef062017-05-03 18:53:05 -05003086 if (!pqi_is_valid_irq(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003087 return IRQ_NONE;
3088
3089 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3090
3091 if (irq == ctrl_info->event_irq)
3092 num_responses_handled += pqi_process_event_intr(ctrl_info);
3093
3094 if (num_responses_handled)
3095 atomic_inc(&ctrl_info->num_interrupts);
3096
3097 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3098 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3099
3100 return IRQ_HANDLED;
3101}
3102
3103static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3104{
Christoph Hellwig52198222016-11-01 08:12:49 -06003105 struct pci_dev *pdev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05003106 int i;
3107 int rc;
3108
Christoph Hellwig52198222016-11-01 08:12:49 -06003109 ctrl_info->event_irq = pci_irq_vector(pdev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05003110
3111 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Christoph Hellwig52198222016-11-01 08:12:49 -06003112 rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
3113 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003114 if (rc) {
Christoph Hellwig52198222016-11-01 08:12:49 -06003115 dev_err(&pdev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05003116 "irq %u init failed with error %d\n",
Christoph Hellwig52198222016-11-01 08:12:49 -06003117 pci_irq_vector(pdev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05003118 return rc;
3119 }
3120 ctrl_info->num_msix_vectors_initialized++;
3121 }
3122
3123 return 0;
3124}
3125
Kevin Barnett98bf0612017-05-03 18:52:28 -05003126static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3127{
3128 int i;
3129
3130 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3131 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3132 &ctrl_info->queue_groups[i]);
3133
3134 ctrl_info->num_msix_vectors_initialized = 0;
3135}
3136
Kevin Barnett6c223762016-06-27 16:41:00 -05003137static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3138{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003139 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003140
Kevin Barnett98bf0612017-05-03 18:52:28 -05003141 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003142 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3143 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003144 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003145 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003146 "MSI-X init failed with error %d\n",
3147 num_vectors_enabled);
3148 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003149 }
3150
Kevin Barnett98bf0612017-05-03 18:52:28 -05003151 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
Kevin Barnett061ef062017-05-03 18:53:05 -05003152 ctrl_info->irq_mode = IRQ_MODE_MSIX;
Kevin Barnett6c223762016-06-27 16:41:00 -05003153 return 0;
3154}
3155
Kevin Barnett98bf0612017-05-03 18:52:28 -05003156static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3157{
3158 if (ctrl_info->num_msix_vectors_enabled) {
3159 pci_free_irq_vectors(ctrl_info->pci_dev);
3160 ctrl_info->num_msix_vectors_enabled = 0;
3161 }
3162}
3163
Kevin Barnett6c223762016-06-27 16:41:00 -05003164static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3165{
3166 unsigned int i;
3167 size_t alloc_length;
3168 size_t element_array_length_per_iq;
3169 size_t element_array_length_per_oq;
3170 void *element_array;
3171 void *next_queue_index;
3172 void *aligned_pointer;
3173 unsigned int num_inbound_queues;
3174 unsigned int num_outbound_queues;
3175 unsigned int num_queue_indexes;
3176 struct pqi_queue_group *queue_group;
3177
3178 element_array_length_per_iq =
3179 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3180 ctrl_info->num_elements_per_iq;
3181 element_array_length_per_oq =
3182 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3183 ctrl_info->num_elements_per_oq;
3184 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3185 num_outbound_queues = ctrl_info->num_queue_groups;
3186 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3187
3188 aligned_pointer = NULL;
3189
3190 for (i = 0; i < num_inbound_queues; i++) {
3191 aligned_pointer = PTR_ALIGN(aligned_pointer,
3192 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3193 aligned_pointer += element_array_length_per_iq;
3194 }
3195
3196 for (i = 0; i < num_outbound_queues; i++) {
3197 aligned_pointer = PTR_ALIGN(aligned_pointer,
3198 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3199 aligned_pointer += element_array_length_per_oq;
3200 }
3201
3202 aligned_pointer = PTR_ALIGN(aligned_pointer,
3203 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3204 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3205 PQI_EVENT_OQ_ELEMENT_LENGTH;
3206
3207 for (i = 0; i < num_queue_indexes; i++) {
3208 aligned_pointer = PTR_ALIGN(aligned_pointer,
3209 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3210 aligned_pointer += sizeof(pqi_index_t);
3211 }
3212
3213 alloc_length = (size_t)aligned_pointer +
3214 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3215
3216 ctrl_info->queue_memory_base =
3217 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3218 alloc_length,
3219 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3220
3221 if (!ctrl_info->queue_memory_base) {
3222 dev_err(&ctrl_info->pci_dev->dev,
3223 "failed to allocate memory for PQI admin queues\n");
3224 return -ENOMEM;
3225 }
3226
3227 ctrl_info->queue_memory_length = alloc_length;
3228
3229 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3230 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3231
3232 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3233 queue_group = &ctrl_info->queue_groups[i];
3234 queue_group->iq_element_array[RAID_PATH] = element_array;
3235 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3236 ctrl_info->queue_memory_base_dma_handle +
3237 (element_array - ctrl_info->queue_memory_base);
3238 element_array += element_array_length_per_iq;
3239 element_array = PTR_ALIGN(element_array,
3240 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3241 queue_group->iq_element_array[AIO_PATH] = element_array;
3242 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3243 ctrl_info->queue_memory_base_dma_handle +
3244 (element_array - ctrl_info->queue_memory_base);
3245 element_array += element_array_length_per_iq;
3246 element_array = PTR_ALIGN(element_array,
3247 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3248 }
3249
3250 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3251 queue_group = &ctrl_info->queue_groups[i];
3252 queue_group->oq_element_array = element_array;
3253 queue_group->oq_element_array_bus_addr =
3254 ctrl_info->queue_memory_base_dma_handle +
3255 (element_array - ctrl_info->queue_memory_base);
3256 element_array += element_array_length_per_oq;
3257 element_array = PTR_ALIGN(element_array,
3258 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3259 }
3260
3261 ctrl_info->event_queue.oq_element_array = element_array;
3262 ctrl_info->event_queue.oq_element_array_bus_addr =
3263 ctrl_info->queue_memory_base_dma_handle +
3264 (element_array - ctrl_info->queue_memory_base);
3265 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3266 PQI_EVENT_OQ_ELEMENT_LENGTH;
3267
3268 next_queue_index = PTR_ALIGN(element_array,
3269 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3270
3271 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3272 queue_group = &ctrl_info->queue_groups[i];
3273 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3274 queue_group->iq_ci_bus_addr[RAID_PATH] =
3275 ctrl_info->queue_memory_base_dma_handle +
3276 (next_queue_index - ctrl_info->queue_memory_base);
3277 next_queue_index += sizeof(pqi_index_t);
3278 next_queue_index = PTR_ALIGN(next_queue_index,
3279 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3280 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3281 queue_group->iq_ci_bus_addr[AIO_PATH] =
3282 ctrl_info->queue_memory_base_dma_handle +
3283 (next_queue_index - ctrl_info->queue_memory_base);
3284 next_queue_index += sizeof(pqi_index_t);
3285 next_queue_index = PTR_ALIGN(next_queue_index,
3286 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3287 queue_group->oq_pi = next_queue_index;
3288 queue_group->oq_pi_bus_addr =
3289 ctrl_info->queue_memory_base_dma_handle +
3290 (next_queue_index - ctrl_info->queue_memory_base);
3291 next_queue_index += sizeof(pqi_index_t);
3292 next_queue_index = PTR_ALIGN(next_queue_index,
3293 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3294 }
3295
3296 ctrl_info->event_queue.oq_pi = next_queue_index;
3297 ctrl_info->event_queue.oq_pi_bus_addr =
3298 ctrl_info->queue_memory_base_dma_handle +
3299 (next_queue_index - ctrl_info->queue_memory_base);
3300
3301 return 0;
3302}
3303
3304static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3305{
3306 unsigned int i;
3307 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3308 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3309
3310 /*
3311 * Initialize the backpointers to the controller structure in
3312 * each operational queue group structure.
3313 */
3314 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3315 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3316
3317 /*
3318 * Assign IDs to all operational queues. Note that the IDs
3319 * assigned to operational IQs are independent of the IDs
3320 * assigned to operational OQs.
3321 */
3322 ctrl_info->event_queue.oq_id = next_oq_id++;
3323 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3324 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3325 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3326 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3327 }
3328
3329 /*
3330 * Assign MSI-X table entry indexes to all queues. Note that the
3331 * interrupt for the event queue is shared with the first queue group.
3332 */
3333 ctrl_info->event_queue.int_msg_num = 0;
3334 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3335 ctrl_info->queue_groups[i].int_msg_num = i;
3336
3337 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3338 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3339 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3340 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3341 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3342 }
3343}
3344
3345static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3346{
3347 size_t alloc_length;
3348 struct pqi_admin_queues_aligned *admin_queues_aligned;
3349 struct pqi_admin_queues *admin_queues;
3350
3351 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3352 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3353
3354 ctrl_info->admin_queue_memory_base =
3355 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3356 alloc_length,
3357 &ctrl_info->admin_queue_memory_base_dma_handle,
3358 GFP_KERNEL);
3359
3360 if (!ctrl_info->admin_queue_memory_base)
3361 return -ENOMEM;
3362
3363 ctrl_info->admin_queue_memory_length = alloc_length;
3364
3365 admin_queues = &ctrl_info->admin_queues;
3366 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3367 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3368 admin_queues->iq_element_array =
3369 &admin_queues_aligned->iq_element_array;
3370 admin_queues->oq_element_array =
3371 &admin_queues_aligned->oq_element_array;
3372 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3373 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3374
3375 admin_queues->iq_element_array_bus_addr =
3376 ctrl_info->admin_queue_memory_base_dma_handle +
3377 (admin_queues->iq_element_array -
3378 ctrl_info->admin_queue_memory_base);
3379 admin_queues->oq_element_array_bus_addr =
3380 ctrl_info->admin_queue_memory_base_dma_handle +
3381 (admin_queues->oq_element_array -
3382 ctrl_info->admin_queue_memory_base);
3383 admin_queues->iq_ci_bus_addr =
3384 ctrl_info->admin_queue_memory_base_dma_handle +
3385 ((void *)admin_queues->iq_ci -
3386 ctrl_info->admin_queue_memory_base);
3387 admin_queues->oq_pi_bus_addr =
3388 ctrl_info->admin_queue_memory_base_dma_handle +
3389 ((void *)admin_queues->oq_pi -
3390 ctrl_info->admin_queue_memory_base);
3391
3392 return 0;
3393}
3394
3395#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3396#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3397
3398static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3399{
3400 struct pqi_device_registers __iomem *pqi_registers;
3401 struct pqi_admin_queues *admin_queues;
3402 unsigned long timeout;
3403 u8 status;
3404 u32 reg;
3405
3406 pqi_registers = ctrl_info->pqi_registers;
3407 admin_queues = &ctrl_info->admin_queues;
3408
3409 writeq((u64)admin_queues->iq_element_array_bus_addr,
3410 &pqi_registers->admin_iq_element_array_addr);
3411 writeq((u64)admin_queues->oq_element_array_bus_addr,
3412 &pqi_registers->admin_oq_element_array_addr);
3413 writeq((u64)admin_queues->iq_ci_bus_addr,
3414 &pqi_registers->admin_iq_ci_addr);
3415 writeq((u64)admin_queues->oq_pi_bus_addr,
3416 &pqi_registers->admin_oq_pi_addr);
3417
3418 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3419 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3420 (admin_queues->int_msg_num << 16);
3421 writel(reg, &pqi_registers->admin_iq_num_elements);
3422 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3423 &pqi_registers->function_and_status_code);
3424
3425 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3426 while (1) {
3427 status = readb(&pqi_registers->function_and_status_code);
3428 if (status == PQI_STATUS_IDLE)
3429 break;
3430 if (time_after(jiffies, timeout))
3431 return -ETIMEDOUT;
3432 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3433 }
3434
3435 /*
3436 * The offset registers are not initialized to the correct
3437 * offsets until *after* the create admin queue pair command
3438 * completes successfully.
3439 */
3440 admin_queues->iq_pi = ctrl_info->iomem_base +
3441 PQI_DEVICE_REGISTERS_OFFSET +
3442 readq(&pqi_registers->admin_iq_pi_offset);
3443 admin_queues->oq_ci = ctrl_info->iomem_base +
3444 PQI_DEVICE_REGISTERS_OFFSET +
3445 readq(&pqi_registers->admin_oq_ci_offset);
3446
3447 return 0;
3448}
3449
3450static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3451 struct pqi_general_admin_request *request)
3452{
3453 struct pqi_admin_queues *admin_queues;
3454 void *next_element;
3455 pqi_index_t iq_pi;
3456
3457 admin_queues = &ctrl_info->admin_queues;
3458 iq_pi = admin_queues->iq_pi_copy;
3459
3460 next_element = admin_queues->iq_element_array +
3461 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3462
3463 memcpy(next_element, request, sizeof(*request));
3464
3465 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3466 admin_queues->iq_pi_copy = iq_pi;
3467
3468 /*
3469 * This write notifies the controller that an IU is available to be
3470 * processed.
3471 */
3472 writel(iq_pi, admin_queues->iq_pi);
3473}
3474
3475static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3476 struct pqi_general_admin_response *response)
3477{
3478 struct pqi_admin_queues *admin_queues;
3479 pqi_index_t oq_pi;
3480 pqi_index_t oq_ci;
3481 unsigned long timeout;
3482
3483 admin_queues = &ctrl_info->admin_queues;
3484 oq_ci = admin_queues->oq_ci_copy;
3485
3486 timeout = (3 * HZ) + jiffies;
3487
3488 while (1) {
3489 oq_pi = *admin_queues->oq_pi;
3490 if (oq_pi != oq_ci)
3491 break;
3492 if (time_after(jiffies, timeout)) {
3493 dev_err(&ctrl_info->pci_dev->dev,
3494 "timed out waiting for admin response\n");
3495 return -ETIMEDOUT;
3496 }
3497 usleep_range(1000, 2000);
3498 }
3499
3500 memcpy(response, admin_queues->oq_element_array +
3501 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3502
3503 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3504 admin_queues->oq_ci_copy = oq_ci;
3505 writel(oq_ci, admin_queues->oq_ci);
3506
3507 return 0;
3508}
3509
3510static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3511 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3512 struct pqi_io_request *io_request)
3513{
3514 struct pqi_io_request *next;
3515 void *next_element;
3516 pqi_index_t iq_pi;
3517 pqi_index_t iq_ci;
3518 size_t iu_length;
3519 unsigned long flags;
3520 unsigned int num_elements_needed;
3521 unsigned int num_elements_to_end_of_queue;
3522 size_t copy_count;
3523 struct pqi_iu_header *request;
3524
3525 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3526
3527 if (io_request)
3528 list_add_tail(&io_request->request_list_entry,
3529 &queue_group->request_list[path]);
3530
3531 iq_pi = queue_group->iq_pi_copy[path];
3532
3533 list_for_each_entry_safe(io_request, next,
3534 &queue_group->request_list[path], request_list_entry) {
3535
3536 request = io_request->iu;
3537
3538 iu_length = get_unaligned_le16(&request->iu_length) +
3539 PQI_REQUEST_HEADER_LENGTH;
3540 num_elements_needed =
3541 DIV_ROUND_UP(iu_length,
3542 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3543
3544 iq_ci = *queue_group->iq_ci[path];
3545
3546 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3547 ctrl_info->num_elements_per_iq))
3548 break;
3549
3550 put_unaligned_le16(queue_group->oq_id,
3551 &request->response_queue_id);
3552
3553 next_element = queue_group->iq_element_array[path] +
3554 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3555
3556 num_elements_to_end_of_queue =
3557 ctrl_info->num_elements_per_iq - iq_pi;
3558
3559 if (num_elements_needed <= num_elements_to_end_of_queue) {
3560 memcpy(next_element, request, iu_length);
3561 } else {
3562 copy_count = num_elements_to_end_of_queue *
3563 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3564 memcpy(next_element, request, copy_count);
3565 memcpy(queue_group->iq_element_array[path],
3566 (u8 *)request + copy_count,
3567 iu_length - copy_count);
3568 }
3569
3570 iq_pi = (iq_pi + num_elements_needed) %
3571 ctrl_info->num_elements_per_iq;
3572
3573 list_del(&io_request->request_list_entry);
3574 }
3575
3576 if (iq_pi != queue_group->iq_pi_copy[path]) {
3577 queue_group->iq_pi_copy[path] = iq_pi;
3578 /*
3579 * This write notifies the controller that one or more IUs are
3580 * available to be processed.
3581 */
3582 writel(iq_pi, queue_group->iq_pi[path]);
3583 }
3584
3585 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3586}
3587
3588static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3589 void *context)
3590{
3591 struct completion *waiting = context;
3592
3593 complete(waiting);
3594}
3595
3596static int pqi_submit_raid_request_synchronous_with_io_request(
3597 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3598 unsigned long timeout_msecs)
3599{
3600 int rc = 0;
3601 DECLARE_COMPLETION_ONSTACK(wait);
3602
3603 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3604 io_request->context = &wait;
3605
3606 pqi_start_io(ctrl_info,
3607 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3608 io_request);
3609
3610 if (timeout_msecs == NO_TIMEOUT) {
3611 wait_for_completion_io(&wait);
3612 } else {
3613 if (!wait_for_completion_io_timeout(&wait,
3614 msecs_to_jiffies(timeout_msecs))) {
3615 dev_warn(&ctrl_info->pci_dev->dev,
3616 "command timed out\n");
3617 rc = -ETIMEDOUT;
3618 }
3619 }
3620
3621 return rc;
3622}
3623
3624static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3625 struct pqi_iu_header *request, unsigned int flags,
3626 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3627{
3628 int rc;
3629 struct pqi_io_request *io_request;
3630 unsigned long start_jiffies;
3631 unsigned long msecs_blocked;
3632 size_t iu_length;
3633
3634 /*
3635 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3636 * are mutually exclusive.
3637 */
3638
3639 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3640 if (down_interruptible(&ctrl_info->sync_request_sem))
3641 return -ERESTARTSYS;
3642 } else {
3643 if (timeout_msecs == NO_TIMEOUT) {
3644 down(&ctrl_info->sync_request_sem);
3645 } else {
3646 start_jiffies = jiffies;
3647 if (down_timeout(&ctrl_info->sync_request_sem,
3648 msecs_to_jiffies(timeout_msecs)))
3649 return -ETIMEDOUT;
3650 msecs_blocked =
3651 jiffies_to_msecs(jiffies - start_jiffies);
3652 if (msecs_blocked >= timeout_msecs)
3653 return -ETIMEDOUT;
3654 timeout_msecs -= msecs_blocked;
3655 }
3656 }
3657
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003658 pqi_ctrl_busy(ctrl_info);
3659 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3660 if (timeout_msecs == 0) {
3661 rc = -ETIMEDOUT;
3662 goto out;
3663 }
3664
Kevin Barnett6c223762016-06-27 16:41:00 -05003665 io_request = pqi_alloc_io_request(ctrl_info);
3666
3667 put_unaligned_le16(io_request->index,
3668 &(((struct pqi_raid_path_request *)request)->request_id));
3669
3670 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3671 ((struct pqi_raid_path_request *)request)->error_index =
3672 ((struct pqi_raid_path_request *)request)->request_id;
3673
3674 iu_length = get_unaligned_le16(&request->iu_length) +
3675 PQI_REQUEST_HEADER_LENGTH;
3676 memcpy(io_request->iu, request, iu_length);
3677
3678 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3679 io_request, timeout_msecs);
3680
3681 if (error_info) {
3682 if (io_request->error_info)
3683 memcpy(error_info, io_request->error_info,
3684 sizeof(*error_info));
3685 else
3686 memset(error_info, 0, sizeof(*error_info));
3687 } else if (rc == 0 && io_request->error_info) {
3688 u8 scsi_status;
3689 struct pqi_raid_error_info *raid_error_info;
3690
3691 raid_error_info = io_request->error_info;
3692 scsi_status = raid_error_info->status;
3693
3694 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3695 raid_error_info->data_out_result ==
3696 PQI_DATA_IN_OUT_UNDERFLOW)
3697 scsi_status = SAM_STAT_GOOD;
3698
3699 if (scsi_status != SAM_STAT_GOOD)
3700 rc = -EIO;
3701 }
3702
3703 pqi_free_io_request(io_request);
3704
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003705out:
3706 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003707 up(&ctrl_info->sync_request_sem);
3708
3709 return rc;
3710}
3711
3712static int pqi_validate_admin_response(
3713 struct pqi_general_admin_response *response, u8 expected_function_code)
3714{
3715 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3716 return -EINVAL;
3717
3718 if (get_unaligned_le16(&response->header.iu_length) !=
3719 PQI_GENERAL_ADMIN_IU_LENGTH)
3720 return -EINVAL;
3721
3722 if (response->function_code != expected_function_code)
3723 return -EINVAL;
3724
3725 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3726 return -EINVAL;
3727
3728 return 0;
3729}
3730
3731static int pqi_submit_admin_request_synchronous(
3732 struct pqi_ctrl_info *ctrl_info,
3733 struct pqi_general_admin_request *request,
3734 struct pqi_general_admin_response *response)
3735{
3736 int rc;
3737
3738 pqi_submit_admin_request(ctrl_info, request);
3739
3740 rc = pqi_poll_for_admin_response(ctrl_info, response);
3741
3742 if (rc == 0)
3743 rc = pqi_validate_admin_response(response,
3744 request->function_code);
3745
3746 return rc;
3747}
3748
3749static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3750{
3751 int rc;
3752 struct pqi_general_admin_request request;
3753 struct pqi_general_admin_response response;
3754 struct pqi_device_capability *capability;
3755 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3756
3757 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3758 if (!capability)
3759 return -ENOMEM;
3760
3761 memset(&request, 0, sizeof(request));
3762
3763 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3764 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3765 &request.header.iu_length);
3766 request.function_code =
3767 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3768 put_unaligned_le32(sizeof(*capability),
3769 &request.data.report_device_capability.buffer_length);
3770
3771 rc = pqi_map_single(ctrl_info->pci_dev,
3772 &request.data.report_device_capability.sg_descriptor,
3773 capability, sizeof(*capability),
3774 PCI_DMA_FROMDEVICE);
3775 if (rc)
3776 goto out;
3777
3778 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3779 &response);
3780
3781 pqi_pci_unmap(ctrl_info->pci_dev,
3782 &request.data.report_device_capability.sg_descriptor, 1,
3783 PCI_DMA_FROMDEVICE);
3784
3785 if (rc)
3786 goto out;
3787
3788 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3789 rc = -EIO;
3790 goto out;
3791 }
3792
3793 ctrl_info->max_inbound_queues =
3794 get_unaligned_le16(&capability->max_inbound_queues);
3795 ctrl_info->max_elements_per_iq =
3796 get_unaligned_le16(&capability->max_elements_per_iq);
3797 ctrl_info->max_iq_element_length =
3798 get_unaligned_le16(&capability->max_iq_element_length)
3799 * 16;
3800 ctrl_info->max_outbound_queues =
3801 get_unaligned_le16(&capability->max_outbound_queues);
3802 ctrl_info->max_elements_per_oq =
3803 get_unaligned_le16(&capability->max_elements_per_oq);
3804 ctrl_info->max_oq_element_length =
3805 get_unaligned_le16(&capability->max_oq_element_length)
3806 * 16;
3807
3808 sop_iu_layer_descriptor =
3809 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3810
3811 ctrl_info->max_inbound_iu_length_per_firmware =
3812 get_unaligned_le16(
3813 &sop_iu_layer_descriptor->max_inbound_iu_length);
3814 ctrl_info->inbound_spanning_supported =
3815 sop_iu_layer_descriptor->inbound_spanning_supported;
3816 ctrl_info->outbound_spanning_supported =
3817 sop_iu_layer_descriptor->outbound_spanning_supported;
3818
3819out:
3820 kfree(capability);
3821
3822 return rc;
3823}
3824
3825static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3826{
3827 if (ctrl_info->max_iq_element_length <
3828 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3829 dev_err(&ctrl_info->pci_dev->dev,
3830 "max. inbound queue element length of %d is less than the required length of %d\n",
3831 ctrl_info->max_iq_element_length,
3832 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3833 return -EINVAL;
3834 }
3835
3836 if (ctrl_info->max_oq_element_length <
3837 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3838 dev_err(&ctrl_info->pci_dev->dev,
3839 "max. outbound queue element length of %d is less than the required length of %d\n",
3840 ctrl_info->max_oq_element_length,
3841 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3842 return -EINVAL;
3843 }
3844
3845 if (ctrl_info->max_inbound_iu_length_per_firmware <
3846 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3847 dev_err(&ctrl_info->pci_dev->dev,
3848 "max. inbound IU length of %u is less than the min. required length of %d\n",
3849 ctrl_info->max_inbound_iu_length_per_firmware,
3850 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3851 return -EINVAL;
3852 }
3853
Kevin Barnett77668f42016-08-31 14:54:23 -05003854 if (!ctrl_info->inbound_spanning_supported) {
3855 dev_err(&ctrl_info->pci_dev->dev,
3856 "the controller does not support inbound spanning\n");
3857 return -EINVAL;
3858 }
3859
3860 if (ctrl_info->outbound_spanning_supported) {
3861 dev_err(&ctrl_info->pci_dev->dev,
3862 "the controller supports outbound spanning but this driver does not\n");
3863 return -EINVAL;
3864 }
3865
Kevin Barnett6c223762016-06-27 16:41:00 -05003866 return 0;
3867}
3868
3869static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3870 bool inbound_queue, u16 queue_id)
3871{
3872 struct pqi_general_admin_request request;
3873 struct pqi_general_admin_response response;
3874
3875 memset(&request, 0, sizeof(request));
3876 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3877 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3878 &request.header.iu_length);
3879 if (inbound_queue)
3880 request.function_code =
3881 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3882 else
3883 request.function_code =
3884 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3885 put_unaligned_le16(queue_id,
3886 &request.data.delete_operational_queue.queue_id);
3887
3888 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3889 &response);
3890}
3891
3892static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3893{
3894 int rc;
3895 struct pqi_event_queue *event_queue;
3896 struct pqi_general_admin_request request;
3897 struct pqi_general_admin_response response;
3898
3899 event_queue = &ctrl_info->event_queue;
3900
3901 /*
3902 * Create OQ (Outbound Queue - device to host queue) to dedicate
3903 * to events.
3904 */
3905 memset(&request, 0, sizeof(request));
3906 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3907 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3908 &request.header.iu_length);
3909 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3910 put_unaligned_le16(event_queue->oq_id,
3911 &request.data.create_operational_oq.queue_id);
3912 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3913 &request.data.create_operational_oq.element_array_addr);
3914 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3915 &request.data.create_operational_oq.pi_addr);
3916 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3917 &request.data.create_operational_oq.num_elements);
3918 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3919 &request.data.create_operational_oq.element_length);
3920 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3921 put_unaligned_le16(event_queue->int_msg_num,
3922 &request.data.create_operational_oq.int_msg_num);
3923
3924 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3925 &response);
3926 if (rc)
3927 return rc;
3928
3929 event_queue->oq_ci = ctrl_info->iomem_base +
3930 PQI_DEVICE_REGISTERS_OFFSET +
3931 get_unaligned_le64(
3932 &response.data.create_operational_oq.oq_ci_offset);
3933
3934 return 0;
3935}
3936
Kevin Barnett061ef062017-05-03 18:53:05 -05003937static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3938 unsigned int group_number)
Kevin Barnett6c223762016-06-27 16:41:00 -05003939{
Kevin Barnett6c223762016-06-27 16:41:00 -05003940 int rc;
3941 struct pqi_queue_group *queue_group;
3942 struct pqi_general_admin_request request;
3943 struct pqi_general_admin_response response;
3944
Kevin Barnett061ef062017-05-03 18:53:05 -05003945 queue_group = &ctrl_info->queue_groups[group_number];
Kevin Barnett6c223762016-06-27 16:41:00 -05003946
3947 /*
3948 * Create IQ (Inbound Queue - host to device queue) for
3949 * RAID path.
3950 */
3951 memset(&request, 0, sizeof(request));
3952 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3953 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3954 &request.header.iu_length);
3955 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3956 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3957 &request.data.create_operational_iq.queue_id);
3958 put_unaligned_le64(
3959 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3960 &request.data.create_operational_iq.element_array_addr);
3961 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3962 &request.data.create_operational_iq.ci_addr);
3963 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3964 &request.data.create_operational_iq.num_elements);
3965 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3966 &request.data.create_operational_iq.element_length);
3967 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3968
3969 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3970 &response);
3971 if (rc) {
3972 dev_err(&ctrl_info->pci_dev->dev,
3973 "error creating inbound RAID queue\n");
3974 return rc;
3975 }
3976
3977 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3978 PQI_DEVICE_REGISTERS_OFFSET +
3979 get_unaligned_le64(
3980 &response.data.create_operational_iq.iq_pi_offset);
3981
3982 /*
3983 * Create IQ (Inbound Queue - host to device queue) for
3984 * Advanced I/O (AIO) path.
3985 */
3986 memset(&request, 0, sizeof(request));
3987 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3988 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3989 &request.header.iu_length);
3990 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3991 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3992 &request.data.create_operational_iq.queue_id);
3993 put_unaligned_le64((u64)queue_group->
3994 iq_element_array_bus_addr[AIO_PATH],
3995 &request.data.create_operational_iq.element_array_addr);
3996 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3997 &request.data.create_operational_iq.ci_addr);
3998 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3999 &request.data.create_operational_iq.num_elements);
4000 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4001 &request.data.create_operational_iq.element_length);
4002 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4003
4004 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4005 &response);
4006 if (rc) {
4007 dev_err(&ctrl_info->pci_dev->dev,
4008 "error creating inbound AIO queue\n");
4009 goto delete_inbound_queue_raid;
4010 }
4011
4012 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4013 PQI_DEVICE_REGISTERS_OFFSET +
4014 get_unaligned_le64(
4015 &response.data.create_operational_iq.iq_pi_offset);
4016
4017 /*
4018 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4019 * assumed to be for RAID path I/O unless we change the queue's
4020 * property.
4021 */
4022 memset(&request, 0, sizeof(request));
4023 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4024 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4025 &request.header.iu_length);
4026 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4027 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4028 &request.data.change_operational_iq_properties.queue_id);
4029 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4030 &request.data.change_operational_iq_properties.vendor_specific);
4031
4032 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4033 &response);
4034 if (rc) {
4035 dev_err(&ctrl_info->pci_dev->dev,
4036 "error changing queue property\n");
4037 goto delete_inbound_queue_aio;
4038 }
4039
4040 /*
4041 * Create OQ (Outbound Queue - device to host queue).
4042 */
4043 memset(&request, 0, sizeof(request));
4044 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4045 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4046 &request.header.iu_length);
4047 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4048 put_unaligned_le16(queue_group->oq_id,
4049 &request.data.create_operational_oq.queue_id);
4050 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4051 &request.data.create_operational_oq.element_array_addr);
4052 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4053 &request.data.create_operational_oq.pi_addr);
4054 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4055 &request.data.create_operational_oq.num_elements);
4056 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4057 &request.data.create_operational_oq.element_length);
4058 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4059 put_unaligned_le16(queue_group->int_msg_num,
4060 &request.data.create_operational_oq.int_msg_num);
4061
4062 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4063 &response);
4064 if (rc) {
4065 dev_err(&ctrl_info->pci_dev->dev,
4066 "error creating outbound queue\n");
4067 goto delete_inbound_queue_aio;
4068 }
4069
4070 queue_group->oq_ci = ctrl_info->iomem_base +
4071 PQI_DEVICE_REGISTERS_OFFSET +
4072 get_unaligned_le64(
4073 &response.data.create_operational_oq.oq_ci_offset);
4074
Kevin Barnett6c223762016-06-27 16:41:00 -05004075 return 0;
4076
4077delete_inbound_queue_aio:
4078 pqi_delete_operational_queue(ctrl_info, true,
4079 queue_group->iq_id[AIO_PATH]);
4080
4081delete_inbound_queue_raid:
4082 pqi_delete_operational_queue(ctrl_info, true,
4083 queue_group->iq_id[RAID_PATH]);
4084
4085 return rc;
4086}
4087
4088static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4089{
4090 int rc;
4091 unsigned int i;
4092
4093 rc = pqi_create_event_queue(ctrl_info);
4094 if (rc) {
4095 dev_err(&ctrl_info->pci_dev->dev,
4096 "error creating event queue\n");
4097 return rc;
4098 }
4099
4100 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
Kevin Barnett061ef062017-05-03 18:53:05 -05004101 rc = pqi_create_queue_group(ctrl_info, i);
Kevin Barnett6c223762016-06-27 16:41:00 -05004102 if (rc) {
4103 dev_err(&ctrl_info->pci_dev->dev,
4104 "error creating queue group number %u/%u\n",
4105 i, ctrl_info->num_queue_groups);
4106 return rc;
4107 }
4108 }
4109
4110 return 0;
4111}
4112
4113#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4114 (offsetof(struct pqi_event_config, descriptors) + \
4115 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4116
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004117static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4118 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05004119{
4120 int rc;
4121 unsigned int i;
4122 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004123 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05004124 struct pqi_general_management_request request;
4125
4126 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4127 GFP_KERNEL);
4128 if (!event_config)
4129 return -ENOMEM;
4130
4131 memset(&request, 0, sizeof(request));
4132
4133 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4134 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4135 data.report_event_configuration.sg_descriptors[1]) -
4136 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4137 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4138 &request.data.report_event_configuration.buffer_length);
4139
4140 rc = pqi_map_single(ctrl_info->pci_dev,
4141 request.data.report_event_configuration.sg_descriptors,
4142 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4143 PCI_DMA_FROMDEVICE);
4144 if (rc)
4145 goto out;
4146
4147 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4148 0, NULL, NO_TIMEOUT);
4149
4150 pqi_pci_unmap(ctrl_info->pci_dev,
4151 request.data.report_event_configuration.sg_descriptors, 1,
4152 PCI_DMA_FROMDEVICE);
4153
4154 if (rc)
4155 goto out;
4156
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004157 for (i = 0; i < event_config->num_event_descriptors; i++) {
4158 event_descriptor = &event_config->descriptors[i];
4159 if (enable_events &&
4160 pqi_is_supported_event(event_descriptor->event_type))
4161 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4162 &event_descriptor->oq_id);
4163 else
4164 put_unaligned_le16(0, &event_descriptor->oq_id);
4165 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004166
4167 memset(&request, 0, sizeof(request));
4168
4169 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4170 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4171 data.report_event_configuration.sg_descriptors[1]) -
4172 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4173 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4174 &request.data.report_event_configuration.buffer_length);
4175
4176 rc = pqi_map_single(ctrl_info->pci_dev,
4177 request.data.report_event_configuration.sg_descriptors,
4178 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4179 PCI_DMA_TODEVICE);
4180 if (rc)
4181 goto out;
4182
4183 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4184 NULL, NO_TIMEOUT);
4185
4186 pqi_pci_unmap(ctrl_info->pci_dev,
4187 request.data.report_event_configuration.sg_descriptors, 1,
4188 PCI_DMA_TODEVICE);
4189
4190out:
4191 kfree(event_config);
4192
4193 return rc;
4194}
4195
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004196static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4197{
4198 return pqi_configure_events(ctrl_info, true);
4199}
4200
4201static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4202{
4203 return pqi_configure_events(ctrl_info, false);
4204}
4205
Kevin Barnett6c223762016-06-27 16:41:00 -05004206static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4207{
4208 unsigned int i;
4209 struct device *dev;
4210 size_t sg_chain_buffer_length;
4211 struct pqi_io_request *io_request;
4212
4213 if (!ctrl_info->io_request_pool)
4214 return;
4215
4216 dev = &ctrl_info->pci_dev->dev;
4217 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4218 io_request = ctrl_info->io_request_pool;
4219
4220 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4221 kfree(io_request->iu);
4222 if (!io_request->sg_chain_buffer)
4223 break;
4224 dma_free_coherent(dev, sg_chain_buffer_length,
4225 io_request->sg_chain_buffer,
4226 io_request->sg_chain_buffer_dma_handle);
4227 io_request++;
4228 }
4229
4230 kfree(ctrl_info->io_request_pool);
4231 ctrl_info->io_request_pool = NULL;
4232}
4233
4234static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4235{
4236 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4237 ctrl_info->error_buffer_length,
4238 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4239
4240 if (!ctrl_info->error_buffer)
4241 return -ENOMEM;
4242
4243 return 0;
4244}
4245
4246static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4247{
4248 unsigned int i;
4249 void *sg_chain_buffer;
4250 size_t sg_chain_buffer_length;
4251 dma_addr_t sg_chain_buffer_dma_handle;
4252 struct device *dev;
4253 struct pqi_io_request *io_request;
4254
4255 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4256 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4257
4258 if (!ctrl_info->io_request_pool) {
4259 dev_err(&ctrl_info->pci_dev->dev,
4260 "failed to allocate I/O request pool\n");
4261 goto error;
4262 }
4263
4264 dev = &ctrl_info->pci_dev->dev;
4265 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4266 io_request = ctrl_info->io_request_pool;
4267
4268 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4269 io_request->iu =
4270 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4271
4272 if (!io_request->iu) {
4273 dev_err(&ctrl_info->pci_dev->dev,
4274 "failed to allocate IU buffers\n");
4275 goto error;
4276 }
4277
4278 sg_chain_buffer = dma_alloc_coherent(dev,
4279 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4280 GFP_KERNEL);
4281
4282 if (!sg_chain_buffer) {
4283 dev_err(&ctrl_info->pci_dev->dev,
4284 "failed to allocate PQI scatter-gather chain buffers\n");
4285 goto error;
4286 }
4287
4288 io_request->index = i;
4289 io_request->sg_chain_buffer = sg_chain_buffer;
4290 io_request->sg_chain_buffer_dma_handle =
4291 sg_chain_buffer_dma_handle;
4292 io_request++;
4293 }
4294
4295 return 0;
4296
4297error:
4298 pqi_free_all_io_requests(ctrl_info);
4299
4300 return -ENOMEM;
4301}
4302
4303/*
4304 * Calculate required resources that are sized based on max. outstanding
4305 * requests and max. transfer size.
4306 */
4307
4308static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4309{
4310 u32 max_transfer_size;
4311 u32 max_sg_entries;
4312
4313 ctrl_info->scsi_ml_can_queue =
4314 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4315 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4316
4317 ctrl_info->error_buffer_length =
4318 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4319
4320 max_transfer_size =
4321 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4322
4323 max_sg_entries = max_transfer_size / PAGE_SIZE;
4324
4325 /* +1 to cover when the buffer is not page-aligned. */
4326 max_sg_entries++;
4327
4328 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4329
4330 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4331
4332 ctrl_info->sg_chain_buffer_length =
4333 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4334 ctrl_info->sg_tablesize = max_sg_entries;
4335 ctrl_info->max_sectors = max_transfer_size / 512;
4336}
4337
4338static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4339{
4340 int num_cpus;
4341 int max_queue_groups;
4342 int num_queue_groups;
4343 u16 num_elements_per_iq;
4344 u16 num_elements_per_oq;
4345
4346 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4347 ctrl_info->max_outbound_queues - 1);
4348 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4349
4350 num_cpus = num_online_cpus();
4351 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4352 num_queue_groups = min(num_queue_groups, max_queue_groups);
4353
4354 ctrl_info->num_queue_groups = num_queue_groups;
Kevin Barnett061ef062017-05-03 18:53:05 -05004355 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05004356
Kevin Barnett77668f42016-08-31 14:54:23 -05004357 /*
4358 * Make sure that the max. inbound IU length is an even multiple
4359 * of our inbound element length.
4360 */
4361 ctrl_info->max_inbound_iu_length =
4362 (ctrl_info->max_inbound_iu_length_per_firmware /
4363 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4364 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004365
4366 num_elements_per_iq =
4367 (ctrl_info->max_inbound_iu_length /
4368 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4369
4370 /* Add one because one element in each queue is unusable. */
4371 num_elements_per_iq++;
4372
4373 num_elements_per_iq = min(num_elements_per_iq,
4374 ctrl_info->max_elements_per_iq);
4375
4376 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4377 num_elements_per_oq = min(num_elements_per_oq,
4378 ctrl_info->max_elements_per_oq);
4379
4380 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4381 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4382
4383 ctrl_info->max_sg_per_iu =
4384 ((ctrl_info->max_inbound_iu_length -
4385 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4386 sizeof(struct pqi_sg_descriptor)) +
4387 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4388}
4389
4390static inline void pqi_set_sg_descriptor(
4391 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4392{
4393 u64 address = (u64)sg_dma_address(sg);
4394 unsigned int length = sg_dma_len(sg);
4395
4396 put_unaligned_le64(address, &sg_descriptor->address);
4397 put_unaligned_le32(length, &sg_descriptor->length);
4398 put_unaligned_le32(0, &sg_descriptor->flags);
4399}
4400
4401static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4402 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4403 struct pqi_io_request *io_request)
4404{
4405 int i;
4406 u16 iu_length;
4407 int sg_count;
4408 bool chained;
4409 unsigned int num_sg_in_iu;
4410 unsigned int max_sg_per_iu;
4411 struct scatterlist *sg;
4412 struct pqi_sg_descriptor *sg_descriptor;
4413
4414 sg_count = scsi_dma_map(scmd);
4415 if (sg_count < 0)
4416 return sg_count;
4417
4418 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4419 PQI_REQUEST_HEADER_LENGTH;
4420
4421 if (sg_count == 0)
4422 goto out;
4423
4424 sg = scsi_sglist(scmd);
4425 sg_descriptor = request->sg_descriptors;
4426 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4427 chained = false;
4428 num_sg_in_iu = 0;
4429 i = 0;
4430
4431 while (1) {
4432 pqi_set_sg_descriptor(sg_descriptor, sg);
4433 if (!chained)
4434 num_sg_in_iu++;
4435 i++;
4436 if (i == sg_count)
4437 break;
4438 sg_descriptor++;
4439 if (i == max_sg_per_iu) {
4440 put_unaligned_le64(
4441 (u64)io_request->sg_chain_buffer_dma_handle,
4442 &sg_descriptor->address);
4443 put_unaligned_le32((sg_count - num_sg_in_iu)
4444 * sizeof(*sg_descriptor),
4445 &sg_descriptor->length);
4446 put_unaligned_le32(CISS_SG_CHAIN,
4447 &sg_descriptor->flags);
4448 chained = true;
4449 num_sg_in_iu++;
4450 sg_descriptor = io_request->sg_chain_buffer;
4451 }
4452 sg = sg_next(sg);
4453 }
4454
4455 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4456 request->partial = chained;
4457 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4458
4459out:
4460 put_unaligned_le16(iu_length, &request->header.iu_length);
4461
4462 return 0;
4463}
4464
4465static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4466 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4467 struct pqi_io_request *io_request)
4468{
4469 int i;
4470 u16 iu_length;
4471 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004472 bool chained;
4473 unsigned int num_sg_in_iu;
4474 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004475 struct scatterlist *sg;
4476 struct pqi_sg_descriptor *sg_descriptor;
4477
4478 sg_count = scsi_dma_map(scmd);
4479 if (sg_count < 0)
4480 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004481
4482 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4483 PQI_REQUEST_HEADER_LENGTH;
4484 num_sg_in_iu = 0;
4485
Kevin Barnett6c223762016-06-27 16:41:00 -05004486 if (sg_count == 0)
4487 goto out;
4488
Kevin Barnetta60eec02016-08-31 14:54:11 -05004489 sg = scsi_sglist(scmd);
4490 sg_descriptor = request->sg_descriptors;
4491 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4492 chained = false;
4493 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004494
Kevin Barnetta60eec02016-08-31 14:54:11 -05004495 while (1) {
4496 pqi_set_sg_descriptor(sg_descriptor, sg);
4497 if (!chained)
4498 num_sg_in_iu++;
4499 i++;
4500 if (i == sg_count)
4501 break;
4502 sg_descriptor++;
4503 if (i == max_sg_per_iu) {
4504 put_unaligned_le64(
4505 (u64)io_request->sg_chain_buffer_dma_handle,
4506 &sg_descriptor->address);
4507 put_unaligned_le32((sg_count - num_sg_in_iu)
4508 * sizeof(*sg_descriptor),
4509 &sg_descriptor->length);
4510 put_unaligned_le32(CISS_SG_CHAIN,
4511 &sg_descriptor->flags);
4512 chained = true;
4513 num_sg_in_iu++;
4514 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004515 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004516 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004517 }
4518
Kevin Barnetta60eec02016-08-31 14:54:11 -05004519 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4520 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004521 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004522
4523out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004524 put_unaligned_le16(iu_length, &request->header.iu_length);
4525 request->num_sg_descriptors = num_sg_in_iu;
4526
4527 return 0;
4528}
4529
4530static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4531 void *context)
4532{
4533 struct scsi_cmnd *scmd;
4534
4535 scmd = io_request->scmd;
4536 pqi_free_io_request(io_request);
4537 scsi_dma_unmap(scmd);
4538 pqi_scsi_done(scmd);
4539}
4540
4541static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4542 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4543 struct pqi_queue_group *queue_group)
4544{
4545 int rc;
4546 size_t cdb_length;
4547 struct pqi_io_request *io_request;
4548 struct pqi_raid_path_request *request;
4549
4550 io_request = pqi_alloc_io_request(ctrl_info);
4551 io_request->io_complete_callback = pqi_raid_io_complete;
4552 io_request->scmd = scmd;
4553
4554 scmd->host_scribble = (unsigned char *)io_request;
4555
4556 request = io_request->iu;
4557 memset(request, 0,
4558 offsetof(struct pqi_raid_path_request, sg_descriptors));
4559
4560 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4561 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4562 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4563 put_unaligned_le16(io_request->index, &request->request_id);
4564 request->error_index = request->request_id;
4565 memcpy(request->lun_number, device->scsi3addr,
4566 sizeof(request->lun_number));
4567
4568 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4569 memcpy(request->cdb, scmd->cmnd, cdb_length);
4570
4571 switch (cdb_length) {
4572 case 6:
4573 case 10:
4574 case 12:
4575 case 16:
4576 /* No bytes in the Additional CDB bytes field */
4577 request->additional_cdb_bytes_usage =
4578 SOP_ADDITIONAL_CDB_BYTES_0;
4579 break;
4580 case 20:
4581 /* 4 bytes in the Additional cdb field */
4582 request->additional_cdb_bytes_usage =
4583 SOP_ADDITIONAL_CDB_BYTES_4;
4584 break;
4585 case 24:
4586 /* 8 bytes in the Additional cdb field */
4587 request->additional_cdb_bytes_usage =
4588 SOP_ADDITIONAL_CDB_BYTES_8;
4589 break;
4590 case 28:
4591 /* 12 bytes in the Additional cdb field */
4592 request->additional_cdb_bytes_usage =
4593 SOP_ADDITIONAL_CDB_BYTES_12;
4594 break;
4595 case 32:
4596 default:
4597 /* 16 bytes in the Additional cdb field */
4598 request->additional_cdb_bytes_usage =
4599 SOP_ADDITIONAL_CDB_BYTES_16;
4600 break;
4601 }
4602
4603 switch (scmd->sc_data_direction) {
4604 case DMA_TO_DEVICE:
4605 request->data_direction = SOP_READ_FLAG;
4606 break;
4607 case DMA_FROM_DEVICE:
4608 request->data_direction = SOP_WRITE_FLAG;
4609 break;
4610 case DMA_NONE:
4611 request->data_direction = SOP_NO_DIRECTION_FLAG;
4612 break;
4613 case DMA_BIDIRECTIONAL:
4614 request->data_direction = SOP_BIDIRECTIONAL;
4615 break;
4616 default:
4617 dev_err(&ctrl_info->pci_dev->dev,
4618 "unknown data direction: %d\n",
4619 scmd->sc_data_direction);
4620 WARN_ON(scmd->sc_data_direction);
4621 break;
4622 }
4623
4624 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4625 if (rc) {
4626 pqi_free_io_request(io_request);
4627 return SCSI_MLQUEUE_HOST_BUSY;
4628 }
4629
4630 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4631
4632 return 0;
4633}
4634
4635static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4636 void *context)
4637{
4638 struct scsi_cmnd *scmd;
4639
4640 scmd = io_request->scmd;
4641 scsi_dma_unmap(scmd);
4642 if (io_request->status == -EAGAIN)
4643 set_host_byte(scmd, DID_IMM_RETRY);
4644 pqi_free_io_request(io_request);
4645 pqi_scsi_done(scmd);
4646}
4647
4648static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4649 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4650 struct pqi_queue_group *queue_group)
4651{
4652 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4653 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4654}
4655
4656static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4657 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4658 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4659 struct pqi_encryption_info *encryption_info)
4660{
4661 int rc;
4662 struct pqi_io_request *io_request;
4663 struct pqi_aio_path_request *request;
4664
4665 io_request = pqi_alloc_io_request(ctrl_info);
4666 io_request->io_complete_callback = pqi_aio_io_complete;
4667 io_request->scmd = scmd;
4668
4669 scmd->host_scribble = (unsigned char *)io_request;
4670
4671 request = io_request->iu;
4672 memset(request, 0,
4673 offsetof(struct pqi_raid_path_request, sg_descriptors));
4674
4675 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4676 put_unaligned_le32(aio_handle, &request->nexus_id);
4677 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4678 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4679 put_unaligned_le16(io_request->index, &request->request_id);
4680 request->error_index = request->request_id;
4681 if (cdb_length > sizeof(request->cdb))
4682 cdb_length = sizeof(request->cdb);
4683 request->cdb_length = cdb_length;
4684 memcpy(request->cdb, cdb, cdb_length);
4685
4686 switch (scmd->sc_data_direction) {
4687 case DMA_TO_DEVICE:
4688 request->data_direction = SOP_READ_FLAG;
4689 break;
4690 case DMA_FROM_DEVICE:
4691 request->data_direction = SOP_WRITE_FLAG;
4692 break;
4693 case DMA_NONE:
4694 request->data_direction = SOP_NO_DIRECTION_FLAG;
4695 break;
4696 case DMA_BIDIRECTIONAL:
4697 request->data_direction = SOP_BIDIRECTIONAL;
4698 break;
4699 default:
4700 dev_err(&ctrl_info->pci_dev->dev,
4701 "unknown data direction: %d\n",
4702 scmd->sc_data_direction);
4703 WARN_ON(scmd->sc_data_direction);
4704 break;
4705 }
4706
4707 if (encryption_info) {
4708 request->encryption_enable = true;
4709 put_unaligned_le16(encryption_info->data_encryption_key_index,
4710 &request->data_encryption_key_index);
4711 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4712 &request->encrypt_tweak_lower);
4713 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4714 &request->encrypt_tweak_upper);
4715 }
4716
4717 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4718 if (rc) {
4719 pqi_free_io_request(io_request);
4720 return SCSI_MLQUEUE_HOST_BUSY;
4721 }
4722
4723 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4724
4725 return 0;
4726}
4727
Kevin Barnett061ef062017-05-03 18:53:05 -05004728static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4729 struct scsi_cmnd *scmd)
4730{
4731 u16 hw_queue;
4732
4733 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4734 if (hw_queue > ctrl_info->max_hw_queue_index)
4735 hw_queue = 0;
4736
4737 return hw_queue;
4738}
4739
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004740/*
4741 * This function gets called just before we hand the completed SCSI request
4742 * back to the SML.
4743 */
4744
4745void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4746{
4747 struct pqi_scsi_dev *device;
4748
4749 device = scmd->device->hostdata;
4750 atomic_dec(&device->scsi_cmds_outstanding);
4751}
4752
Kevin Barnett6c223762016-06-27 16:41:00 -05004753static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004754 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05004755{
4756 int rc;
4757 struct pqi_ctrl_info *ctrl_info;
4758 struct pqi_scsi_dev *device;
Kevin Barnett061ef062017-05-03 18:53:05 -05004759 u16 hw_queue;
Kevin Barnett6c223762016-06-27 16:41:00 -05004760 struct pqi_queue_group *queue_group;
4761 bool raid_bypassed;
4762
4763 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05004764 ctrl_info = shost_to_hba(shost);
4765
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004766 atomic_inc(&device->scsi_cmds_outstanding);
4767
Kevin Barnett6c223762016-06-27 16:41:00 -05004768 if (pqi_ctrl_offline(ctrl_info)) {
4769 set_host_byte(scmd, DID_NO_CONNECT);
4770 pqi_scsi_done(scmd);
4771 return 0;
4772 }
4773
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004774 pqi_ctrl_busy(ctrl_info);
4775 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4776 rc = SCSI_MLQUEUE_HOST_BUSY;
4777 goto out;
4778 }
4779
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004780 /*
4781 * This is necessary because the SML doesn't zero out this field during
4782 * error recovery.
4783 */
4784 scmd->result = 0;
4785
Kevin Barnett061ef062017-05-03 18:53:05 -05004786 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4787 queue_group = &ctrl_info->queue_groups[hw_queue];
Kevin Barnett6c223762016-06-27 16:41:00 -05004788
4789 if (pqi_is_logical_device(device)) {
4790 raid_bypassed = false;
4791 if (device->offload_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01004792 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05004793 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4794 scmd, queue_group);
4795 if (rc == 0 ||
4796 rc == SCSI_MLQUEUE_HOST_BUSY ||
4797 rc == SAM_STAT_CHECK_CONDITION ||
4798 rc == SAM_STAT_RESERVATION_CONFLICT)
4799 raid_bypassed = true;
4800 }
4801 if (!raid_bypassed)
4802 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4803 queue_group);
4804 } else {
4805 if (device->aio_enabled)
4806 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4807 queue_group);
4808 else
4809 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4810 queue_group);
4811 }
4812
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004813out:
4814 pqi_ctrl_unbusy(ctrl_info);
4815 if (rc)
4816 atomic_dec(&device->scsi_cmds_outstanding);
4817
Kevin Barnett6c223762016-06-27 16:41:00 -05004818 return rc;
4819}
4820
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004821static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4822 struct pqi_queue_group *queue_group)
4823{
4824 unsigned int path;
4825 unsigned long flags;
4826 bool list_is_empty;
4827
4828 for (path = 0; path < 2; path++) {
4829 while (1) {
4830 spin_lock_irqsave(
4831 &queue_group->submit_lock[path], flags);
4832 list_is_empty =
4833 list_empty(&queue_group->request_list[path]);
4834 spin_unlock_irqrestore(
4835 &queue_group->submit_lock[path], flags);
4836 if (list_is_empty)
4837 break;
4838 pqi_check_ctrl_health(ctrl_info);
4839 if (pqi_ctrl_offline(ctrl_info))
4840 return -ENXIO;
4841 usleep_range(1000, 2000);
4842 }
4843 }
4844
4845 return 0;
4846}
4847
4848static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4849{
4850 int rc;
4851 unsigned int i;
4852 unsigned int path;
4853 struct pqi_queue_group *queue_group;
4854 pqi_index_t iq_pi;
4855 pqi_index_t iq_ci;
4856
4857 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4858 queue_group = &ctrl_info->queue_groups[i];
4859
4860 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4861 if (rc)
4862 return rc;
4863
4864 for (path = 0; path < 2; path++) {
4865 iq_pi = queue_group->iq_pi_copy[path];
4866
4867 while (1) {
4868 iq_ci = *queue_group->iq_ci[path];
4869 if (iq_ci == iq_pi)
4870 break;
4871 pqi_check_ctrl_health(ctrl_info);
4872 if (pqi_ctrl_offline(ctrl_info))
4873 return -ENXIO;
4874 usleep_range(1000, 2000);
4875 }
4876 }
4877 }
4878
4879 return 0;
4880}
4881
4882static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
4883 struct pqi_scsi_dev *device)
4884{
4885 unsigned int i;
4886 unsigned int path;
4887 struct pqi_queue_group *queue_group;
4888 unsigned long flags;
4889 struct pqi_io_request *io_request;
4890 struct pqi_io_request *next;
4891 struct scsi_cmnd *scmd;
4892 struct pqi_scsi_dev *scsi_device;
4893
4894 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4895 queue_group = &ctrl_info->queue_groups[i];
4896
4897 for (path = 0; path < 2; path++) {
4898 spin_lock_irqsave(
4899 &queue_group->submit_lock[path], flags);
4900
4901 list_for_each_entry_safe(io_request, next,
4902 &queue_group->request_list[path],
4903 request_list_entry) {
4904 scmd = io_request->scmd;
4905 if (!scmd)
4906 continue;
4907
4908 scsi_device = scmd->device->hostdata;
4909 if (scsi_device != device)
4910 continue;
4911
4912 list_del(&io_request->request_list_entry);
4913 set_host_byte(scmd, DID_RESET);
4914 pqi_scsi_done(scmd);
4915 }
4916
4917 spin_unlock_irqrestore(
4918 &queue_group->submit_lock[path], flags);
4919 }
4920 }
4921}
4922
Kevin Barnett061ef062017-05-03 18:53:05 -05004923static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
4924 struct pqi_scsi_dev *device)
4925{
4926 while (atomic_read(&device->scsi_cmds_outstanding)) {
4927 pqi_check_ctrl_health(ctrl_info);
4928 if (pqi_ctrl_offline(ctrl_info))
4929 return -ENXIO;
4930 usleep_range(1000, 2000);
4931 }
4932
4933 return 0;
4934}
4935
4936static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
4937{
4938 bool io_pending;
4939 unsigned long flags;
4940 struct pqi_scsi_dev *device;
4941
4942 while (1) {
4943 io_pending = false;
4944
4945 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4946 list_for_each_entry(device, &ctrl_info->scsi_device_list,
4947 scsi_device_list_entry) {
4948 if (atomic_read(&device->scsi_cmds_outstanding)) {
4949 io_pending = true;
4950 break;
4951 }
4952 }
4953 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
4954 flags);
4955
4956 if (!io_pending)
4957 break;
4958
4959 pqi_check_ctrl_health(ctrl_info);
4960 if (pqi_ctrl_offline(ctrl_info))
4961 return -ENXIO;
4962
4963 usleep_range(1000, 2000);
4964 }
4965
4966 return 0;
4967}
4968
Kevin Barnett14bb2152016-08-31 14:54:35 -05004969static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004970 void *context)
4971{
4972 struct completion *waiting = context;
4973
4974 complete(waiting);
4975}
4976
Kevin Barnett14bb2152016-08-31 14:54:35 -05004977#define PQI_LUN_RESET_TIMEOUT_SECS 10
4978
4979static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4980 struct pqi_scsi_dev *device, struct completion *wait)
4981{
4982 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05004983
4984 while (1) {
4985 if (wait_for_completion_io_timeout(wait,
4986 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4987 rc = 0;
4988 break;
4989 }
4990
4991 pqi_check_ctrl_health(ctrl_info);
4992 if (pqi_ctrl_offline(ctrl_info)) {
4993 rc = -ETIMEDOUT;
4994 break;
4995 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05004996 }
4997
4998 return rc;
4999}
5000
5001static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005002 struct pqi_scsi_dev *device)
5003{
5004 int rc;
5005 struct pqi_io_request *io_request;
5006 DECLARE_COMPLETION_ONSTACK(wait);
5007 struct pqi_task_management_request *request;
5008
Kevin Barnett6c223762016-06-27 16:41:00 -05005009 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05005010 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05005011 io_request->context = &wait;
5012
5013 request = io_request->iu;
5014 memset(request, 0, sizeof(*request));
5015
5016 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5017 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5018 &request->header.iu_length);
5019 put_unaligned_le16(io_request->index, &request->request_id);
5020 memcpy(request->lun_number, device->scsi3addr,
5021 sizeof(request->lun_number));
5022 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5023
5024 pqi_start_io(ctrl_info,
5025 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5026 io_request);
5027
Kevin Barnett14bb2152016-08-31 14:54:35 -05005028 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5029 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05005030 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05005031
5032 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05005033
5034 return rc;
5035}
5036
5037/* Performs a reset at the LUN level. */
5038
5039static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5040 struct pqi_scsi_dev *device)
5041{
5042 int rc;
5043
Kevin Barnett14bb2152016-08-31 14:54:35 -05005044 rc = pqi_lun_reset(ctrl_info, device);
Kevin Barnett061ef062017-05-03 18:53:05 -05005045 if (rc == 0)
5046 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05005047
Kevin Barnett14bb2152016-08-31 14:54:35 -05005048 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05005049}
5050
5051static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5052{
5053 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005054 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05005055 struct pqi_ctrl_info *ctrl_info;
5056 struct pqi_scsi_dev *device;
5057
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005058 shost = scmd->device->host;
5059 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05005060 device = scmd->device->hostdata;
5061
5062 dev_err(&ctrl_info->pci_dev->dev,
5063 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005064 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05005065
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005066 pqi_check_ctrl_health(ctrl_info);
5067 if (pqi_ctrl_offline(ctrl_info)) {
5068 rc = FAILED;
5069 goto out;
5070 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005071
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005072 mutex_lock(&ctrl_info->lun_reset_mutex);
5073
5074 pqi_ctrl_block_requests(ctrl_info);
5075 pqi_ctrl_wait_until_quiesced(ctrl_info);
5076 pqi_fail_io_queued_for_device(ctrl_info, device);
5077 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5078 pqi_device_reset_start(device);
5079 pqi_ctrl_unblock_requests(ctrl_info);
5080
5081 if (rc)
5082 rc = FAILED;
5083 else
5084 rc = pqi_device_reset(ctrl_info, device);
5085
5086 pqi_device_reset_done(device);
5087
5088 mutex_unlock(&ctrl_info->lun_reset_mutex);
5089
5090out:
Kevin Barnett6c223762016-06-27 16:41:00 -05005091 dev_err(&ctrl_info->pci_dev->dev,
5092 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005093 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05005094 rc == SUCCESS ? "SUCCESS" : "FAILED");
5095
5096 return rc;
5097}
5098
5099static int pqi_slave_alloc(struct scsi_device *sdev)
5100{
5101 struct pqi_scsi_dev *device;
5102 unsigned long flags;
5103 struct pqi_ctrl_info *ctrl_info;
5104 struct scsi_target *starget;
5105 struct sas_rphy *rphy;
5106
5107 ctrl_info = shost_to_hba(sdev->host);
5108
5109 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5110
5111 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5112 starget = scsi_target(sdev);
5113 rphy = target_to_rphy(starget);
5114 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5115 if (device) {
5116 device->target = sdev_id(sdev);
5117 device->lun = sdev->lun;
5118 device->target_lun_valid = true;
5119 }
5120 } else {
5121 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5122 sdev_id(sdev), sdev->lun);
5123 }
5124
5125 if (device && device->expose_device) {
5126 sdev->hostdata = device;
5127 device->sdev = sdev;
5128 if (device->queue_depth) {
5129 device->advertised_queue_depth = device->queue_depth;
5130 scsi_change_queue_depth(sdev,
5131 device->advertised_queue_depth);
5132 }
5133 }
5134
5135 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5136
5137 return 0;
5138}
5139
5140static int pqi_slave_configure(struct scsi_device *sdev)
5141{
5142 struct pqi_scsi_dev *device;
5143
5144 device = sdev->hostdata;
5145 if (!device->expose_device)
5146 sdev->no_uld_attach = true;
5147
5148 return 0;
5149}
5150
Christoph Hellwig52198222016-11-01 08:12:49 -06005151static int pqi_map_queues(struct Scsi_Host *shost)
5152{
5153 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5154
5155 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5156}
5157
Kevin Barnett6c223762016-06-27 16:41:00 -05005158static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5159 void __user *arg)
5160{
5161 struct pci_dev *pci_dev;
5162 u32 subsystem_vendor;
5163 u32 subsystem_device;
5164 cciss_pci_info_struct pciinfo;
5165
5166 if (!arg)
5167 return -EINVAL;
5168
5169 pci_dev = ctrl_info->pci_dev;
5170
5171 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5172 pciinfo.bus = pci_dev->bus->number;
5173 pciinfo.dev_fn = pci_dev->devfn;
5174 subsystem_vendor = pci_dev->subsystem_vendor;
5175 subsystem_device = pci_dev->subsystem_device;
5176 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5177 subsystem_vendor;
5178
5179 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5180 return -EFAULT;
5181
5182 return 0;
5183}
5184
5185static int pqi_getdrivver_ioctl(void __user *arg)
5186{
5187 u32 version;
5188
5189 if (!arg)
5190 return -EINVAL;
5191
5192 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5193 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5194
5195 if (copy_to_user(arg, &version, sizeof(version)))
5196 return -EFAULT;
5197
5198 return 0;
5199}
5200
5201struct ciss_error_info {
5202 u8 scsi_status;
5203 int command_status;
5204 size_t sense_data_length;
5205};
5206
5207static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5208 struct ciss_error_info *ciss_error_info)
5209{
5210 int ciss_cmd_status;
5211 size_t sense_data_length;
5212
5213 switch (pqi_error_info->data_out_result) {
5214 case PQI_DATA_IN_OUT_GOOD:
5215 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5216 break;
5217 case PQI_DATA_IN_OUT_UNDERFLOW:
5218 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5219 break;
5220 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5221 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5222 break;
5223 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5224 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5225 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5226 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5227 case PQI_DATA_IN_OUT_ERROR:
5228 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5229 break;
5230 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5231 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5232 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5233 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5234 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5235 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5236 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5237 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5238 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5239 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5240 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5241 break;
5242 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5243 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5244 break;
5245 case PQI_DATA_IN_OUT_ABORTED:
5246 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5247 break;
5248 case PQI_DATA_IN_OUT_TIMEOUT:
5249 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5250 break;
5251 default:
5252 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5253 break;
5254 }
5255
5256 sense_data_length =
5257 get_unaligned_le16(&pqi_error_info->sense_data_length);
5258 if (sense_data_length == 0)
5259 sense_data_length =
5260 get_unaligned_le16(&pqi_error_info->response_data_length);
5261 if (sense_data_length)
5262 if (sense_data_length > sizeof(pqi_error_info->data))
5263 sense_data_length = sizeof(pqi_error_info->data);
5264
5265 ciss_error_info->scsi_status = pqi_error_info->status;
5266 ciss_error_info->command_status = ciss_cmd_status;
5267 ciss_error_info->sense_data_length = sense_data_length;
5268}
5269
5270static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5271{
5272 int rc;
5273 char *kernel_buffer = NULL;
5274 u16 iu_length;
5275 size_t sense_data_length;
5276 IOCTL_Command_struct iocommand;
5277 struct pqi_raid_path_request request;
5278 struct pqi_raid_error_info pqi_error_info;
5279 struct ciss_error_info ciss_error_info;
5280
5281 if (pqi_ctrl_offline(ctrl_info))
5282 return -ENXIO;
5283 if (!arg)
5284 return -EINVAL;
5285 if (!capable(CAP_SYS_RAWIO))
5286 return -EPERM;
5287 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5288 return -EFAULT;
5289 if (iocommand.buf_size < 1 &&
5290 iocommand.Request.Type.Direction != XFER_NONE)
5291 return -EINVAL;
5292 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5293 return -EINVAL;
5294 if (iocommand.Request.Type.Type != TYPE_CMD)
5295 return -EINVAL;
5296
5297 switch (iocommand.Request.Type.Direction) {
5298 case XFER_NONE:
5299 case XFER_WRITE:
5300 case XFER_READ:
5301 break;
5302 default:
5303 return -EINVAL;
5304 }
5305
5306 if (iocommand.buf_size > 0) {
5307 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5308 if (!kernel_buffer)
5309 return -ENOMEM;
5310 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5311 if (copy_from_user(kernel_buffer, iocommand.buf,
5312 iocommand.buf_size)) {
5313 rc = -EFAULT;
5314 goto out;
5315 }
5316 } else {
5317 memset(kernel_buffer, 0, iocommand.buf_size);
5318 }
5319 }
5320
5321 memset(&request, 0, sizeof(request));
5322
5323 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5324 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5325 PQI_REQUEST_HEADER_LENGTH;
5326 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5327 sizeof(request.lun_number));
5328 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5329 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5330
5331 switch (iocommand.Request.Type.Direction) {
5332 case XFER_NONE:
5333 request.data_direction = SOP_NO_DIRECTION_FLAG;
5334 break;
5335 case XFER_WRITE:
5336 request.data_direction = SOP_WRITE_FLAG;
5337 break;
5338 case XFER_READ:
5339 request.data_direction = SOP_READ_FLAG;
5340 break;
5341 }
5342
5343 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5344
5345 if (iocommand.buf_size > 0) {
5346 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5347
5348 rc = pqi_map_single(ctrl_info->pci_dev,
5349 &request.sg_descriptors[0], kernel_buffer,
5350 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5351 if (rc)
5352 goto out;
5353
5354 iu_length += sizeof(request.sg_descriptors[0]);
5355 }
5356
5357 put_unaligned_le16(iu_length, &request.header.iu_length);
5358
5359 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5360 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5361
5362 if (iocommand.buf_size > 0)
5363 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5364 PCI_DMA_BIDIRECTIONAL);
5365
5366 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5367
5368 if (rc == 0) {
5369 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5370 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5371 iocommand.error_info.CommandStatus =
5372 ciss_error_info.command_status;
5373 sense_data_length = ciss_error_info.sense_data_length;
5374 if (sense_data_length) {
5375 if (sense_data_length >
5376 sizeof(iocommand.error_info.SenseInfo))
5377 sense_data_length =
5378 sizeof(iocommand.error_info.SenseInfo);
5379 memcpy(iocommand.error_info.SenseInfo,
5380 pqi_error_info.data, sense_data_length);
5381 iocommand.error_info.SenseLen = sense_data_length;
5382 }
5383 }
5384
5385 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5386 rc = -EFAULT;
5387 goto out;
5388 }
5389
5390 if (rc == 0 && iocommand.buf_size > 0 &&
5391 (iocommand.Request.Type.Direction & XFER_READ)) {
5392 if (copy_to_user(iocommand.buf, kernel_buffer,
5393 iocommand.buf_size)) {
5394 rc = -EFAULT;
5395 }
5396 }
5397
5398out:
5399 kfree(kernel_buffer);
5400
5401 return rc;
5402}
5403
5404static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5405{
5406 int rc;
5407 struct pqi_ctrl_info *ctrl_info;
5408
5409 ctrl_info = shost_to_hba(sdev->host);
5410
5411 switch (cmd) {
5412 case CCISS_DEREGDISK:
5413 case CCISS_REGNEWDISK:
5414 case CCISS_REGNEWD:
5415 rc = pqi_scan_scsi_devices(ctrl_info);
5416 break;
5417 case CCISS_GETPCIINFO:
5418 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5419 break;
5420 case CCISS_GETDRIVVER:
5421 rc = pqi_getdrivver_ioctl(arg);
5422 break;
5423 case CCISS_PASSTHRU:
5424 rc = pqi_passthru_ioctl(ctrl_info, arg);
5425 break;
5426 default:
5427 rc = -EINVAL;
5428 break;
5429 }
5430
5431 return rc;
5432}
5433
5434static ssize_t pqi_version_show(struct device *dev,
5435 struct device_attribute *attr, char *buffer)
5436{
5437 ssize_t count = 0;
5438 struct Scsi_Host *shost;
5439 struct pqi_ctrl_info *ctrl_info;
5440
5441 shost = class_to_shost(dev);
5442 ctrl_info = shost_to_hba(shost);
5443
5444 count += snprintf(buffer + count, PAGE_SIZE - count,
5445 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5446
5447 count += snprintf(buffer + count, PAGE_SIZE - count,
5448 "firmware: %s\n", ctrl_info->firmware_version);
5449
5450 return count;
5451}
5452
5453static ssize_t pqi_host_rescan_store(struct device *dev,
5454 struct device_attribute *attr, const char *buffer, size_t count)
5455{
5456 struct Scsi_Host *shost = class_to_shost(dev);
5457
5458 pqi_scan_start(shost);
5459
5460 return count;
5461}
5462
5463static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5464static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5465
5466static struct device_attribute *pqi_shost_attrs[] = {
5467 &dev_attr_version,
5468 &dev_attr_rescan,
5469 NULL
5470};
5471
5472static ssize_t pqi_sas_address_show(struct device *dev,
5473 struct device_attribute *attr, char *buffer)
5474{
5475 struct pqi_ctrl_info *ctrl_info;
5476 struct scsi_device *sdev;
5477 struct pqi_scsi_dev *device;
5478 unsigned long flags;
5479 u64 sas_address;
5480
5481 sdev = to_scsi_device(dev);
5482 ctrl_info = shost_to_hba(sdev->host);
5483
5484 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5485
5486 device = sdev->hostdata;
5487 if (pqi_is_logical_device(device)) {
5488 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5489 flags);
5490 return -ENODEV;
5491 }
5492 sas_address = device->sas_address;
5493
5494 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5495
5496 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5497}
5498
5499static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5500 struct device_attribute *attr, char *buffer)
5501{
5502 struct pqi_ctrl_info *ctrl_info;
5503 struct scsi_device *sdev;
5504 struct pqi_scsi_dev *device;
5505 unsigned long flags;
5506
5507 sdev = to_scsi_device(dev);
5508 ctrl_info = shost_to_hba(sdev->host);
5509
5510 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5511
5512 device = sdev->hostdata;
5513 buffer[0] = device->offload_enabled ? '1' : '0';
5514 buffer[1] = '\n';
5515 buffer[2] = '\0';
5516
5517 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5518
5519 return 2;
5520}
5521
5522static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5523static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5524 pqi_ssd_smart_path_enabled_show, NULL);
5525
5526static struct device_attribute *pqi_sdev_attrs[] = {
5527 &dev_attr_sas_address,
5528 &dev_attr_ssd_smart_path_enabled,
5529 NULL
5530};
5531
5532static struct scsi_host_template pqi_driver_template = {
5533 .module = THIS_MODULE,
5534 .name = DRIVER_NAME_SHORT,
5535 .proc_name = DRIVER_NAME_SHORT,
5536 .queuecommand = pqi_scsi_queue_command,
5537 .scan_start = pqi_scan_start,
5538 .scan_finished = pqi_scan_finished,
5539 .this_id = -1,
5540 .use_clustering = ENABLE_CLUSTERING,
5541 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5542 .ioctl = pqi_ioctl,
5543 .slave_alloc = pqi_slave_alloc,
5544 .slave_configure = pqi_slave_configure,
Christoph Hellwig52198222016-11-01 08:12:49 -06005545 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05005546 .sdev_attrs = pqi_sdev_attrs,
5547 .shost_attrs = pqi_shost_attrs,
5548};
5549
5550static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5551{
5552 int rc;
5553 struct Scsi_Host *shost;
5554
5555 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5556 if (!shost) {
5557 dev_err(&ctrl_info->pci_dev->dev,
5558 "scsi_host_alloc failed for controller %u\n",
5559 ctrl_info->ctrl_id);
5560 return -ENOMEM;
5561 }
5562
5563 shost->io_port = 0;
5564 shost->n_io_port = 0;
5565 shost->this_id = -1;
5566 shost->max_channel = PQI_MAX_BUS;
5567 shost->max_cmd_len = MAX_COMMAND_SIZE;
5568 shost->max_lun = ~0;
5569 shost->max_id = ~0;
5570 shost->max_sectors = ctrl_info->max_sectors;
5571 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5572 shost->cmd_per_lun = shost->can_queue;
5573 shost->sg_tablesize = ctrl_info->sg_tablesize;
5574 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06005575 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05005576 shost->unique_id = shost->irq;
5577 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5578 shost->hostdata[0] = (unsigned long)ctrl_info;
5579
5580 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5581 if (rc) {
5582 dev_err(&ctrl_info->pci_dev->dev,
5583 "scsi_add_host failed for controller %u\n",
5584 ctrl_info->ctrl_id);
5585 goto free_host;
5586 }
5587
5588 rc = pqi_add_sas_host(shost, ctrl_info);
5589 if (rc) {
5590 dev_err(&ctrl_info->pci_dev->dev,
5591 "add SAS host failed for controller %u\n",
5592 ctrl_info->ctrl_id);
5593 goto remove_host;
5594 }
5595
5596 ctrl_info->scsi_host = shost;
5597
5598 return 0;
5599
5600remove_host:
5601 scsi_remove_host(shost);
5602free_host:
5603 scsi_host_put(shost);
5604
5605 return rc;
5606}
5607
5608static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5609{
5610 struct Scsi_Host *shost;
5611
5612 pqi_delete_sas_host(ctrl_info);
5613
5614 shost = ctrl_info->scsi_host;
5615 if (!shost)
5616 return;
5617
5618 scsi_remove_host(shost);
5619 scsi_host_put(shost);
5620}
5621
5622#define PQI_RESET_ACTION_RESET 0x1
5623
5624#define PQI_RESET_TYPE_NO_RESET 0x0
5625#define PQI_RESET_TYPE_SOFT_RESET 0x1
5626#define PQI_RESET_TYPE_FIRM_RESET 0x2
5627#define PQI_RESET_TYPE_HARD_RESET 0x3
5628
5629static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5630{
5631 int rc;
5632 u32 reset_params;
5633
5634 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5635 PQI_RESET_TYPE_HARD_RESET;
5636
5637 writel(reset_params,
5638 &ctrl_info->pqi_registers->device_reset);
5639
5640 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5641 if (rc)
5642 dev_err(&ctrl_info->pci_dev->dev,
5643 "PQI reset failed\n");
5644
5645 return rc;
5646}
5647
5648static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5649{
5650 int rc;
5651 struct bmic_identify_controller *identify;
5652
5653 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5654 if (!identify)
5655 return -ENOMEM;
5656
5657 rc = pqi_identify_controller(ctrl_info, identify);
5658 if (rc)
5659 goto out;
5660
5661 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5662 sizeof(identify->firmware_version));
5663 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5664 snprintf(ctrl_info->firmware_version +
5665 strlen(ctrl_info->firmware_version),
5666 sizeof(ctrl_info->firmware_version),
5667 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5668
5669out:
5670 kfree(identify);
5671
5672 return rc;
5673}
5674
Kevin Barnett162d7752017-05-03 18:52:46 -05005675/* Switches the controller from PQI mode back into SIS mode. */
5676
5677static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5678{
5679 int rc;
5680
Kevin Barnett061ef062017-05-03 18:53:05 -05005681 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
Kevin Barnett162d7752017-05-03 18:52:46 -05005682 rc = pqi_reset(ctrl_info);
5683 if (rc)
5684 return rc;
5685 sis_reenable_sis_mode(ctrl_info);
5686 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5687
5688 return 0;
5689}
5690
5691/*
5692 * If the controller isn't already in SIS mode, this function forces it into
5693 * SIS mode.
5694 */
5695
5696static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05005697{
5698 if (!sis_is_firmware_running(ctrl_info))
5699 return -ENXIO;
5700
Kevin Barnett162d7752017-05-03 18:52:46 -05005701 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5702 return 0;
5703
5704 if (sis_is_kernel_up(ctrl_info)) {
5705 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5706 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05005707 }
5708
Kevin Barnett162d7752017-05-03 18:52:46 -05005709 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05005710}
5711
Kevin Barnett6c223762016-06-27 16:41:00 -05005712static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5713{
5714 int rc;
5715
Kevin Barnett162d7752017-05-03 18:52:46 -05005716 rc = pqi_force_sis_mode(ctrl_info);
5717 if (rc)
5718 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05005719
5720 /*
5721 * Wait until the controller is ready to start accepting SIS
5722 * commands.
5723 */
5724 rc = sis_wait_for_ctrl_ready(ctrl_info);
5725 if (rc) {
5726 dev_err(&ctrl_info->pci_dev->dev,
5727 "error initializing SIS interface\n");
5728 return rc;
5729 }
5730
5731 /*
5732 * Get the controller properties. This allows us to determine
5733 * whether or not it supports PQI mode.
5734 */
5735 rc = sis_get_ctrl_properties(ctrl_info);
5736 if (rc) {
5737 dev_err(&ctrl_info->pci_dev->dev,
5738 "error obtaining controller properties\n");
5739 return rc;
5740 }
5741
5742 rc = sis_get_pqi_capabilities(ctrl_info);
5743 if (rc) {
5744 dev_err(&ctrl_info->pci_dev->dev,
5745 "error obtaining controller capabilities\n");
5746 return rc;
5747 }
5748
5749 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5750 ctrl_info->max_outstanding_requests =
5751 PQI_MAX_OUTSTANDING_REQUESTS;
5752
5753 pqi_calculate_io_resources(ctrl_info);
5754
5755 rc = pqi_alloc_error_buffer(ctrl_info);
5756 if (rc) {
5757 dev_err(&ctrl_info->pci_dev->dev,
5758 "failed to allocate PQI error buffer\n");
5759 return rc;
5760 }
5761
5762 /*
5763 * If the function we are about to call succeeds, the
5764 * controller will transition from legacy SIS mode
5765 * into PQI mode.
5766 */
5767 rc = sis_init_base_struct_addr(ctrl_info);
5768 if (rc) {
5769 dev_err(&ctrl_info->pci_dev->dev,
5770 "error initializing PQI mode\n");
5771 return rc;
5772 }
5773
5774 /* Wait for the controller to complete the SIS -> PQI transition. */
5775 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5776 if (rc) {
5777 dev_err(&ctrl_info->pci_dev->dev,
5778 "transition to PQI mode failed\n");
5779 return rc;
5780 }
5781
5782 /* From here on, we are running in PQI mode. */
5783 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05005784 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05005785
5786 rc = pqi_alloc_admin_queues(ctrl_info);
5787 if (rc) {
5788 dev_err(&ctrl_info->pci_dev->dev,
5789 "error allocating admin queues\n");
5790 return rc;
5791 }
5792
5793 rc = pqi_create_admin_queues(ctrl_info);
5794 if (rc) {
5795 dev_err(&ctrl_info->pci_dev->dev,
5796 "error creating admin queues\n");
5797 return rc;
5798 }
5799
5800 rc = pqi_report_device_capability(ctrl_info);
5801 if (rc) {
5802 dev_err(&ctrl_info->pci_dev->dev,
5803 "obtaining device capability failed\n");
5804 return rc;
5805 }
5806
5807 rc = pqi_validate_device_capability(ctrl_info);
5808 if (rc)
5809 return rc;
5810
5811 pqi_calculate_queue_resources(ctrl_info);
5812
5813 rc = pqi_enable_msix_interrupts(ctrl_info);
5814 if (rc)
5815 return rc;
5816
5817 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5818 ctrl_info->max_msix_vectors =
5819 ctrl_info->num_msix_vectors_enabled;
5820 pqi_calculate_queue_resources(ctrl_info);
5821 }
5822
5823 rc = pqi_alloc_io_resources(ctrl_info);
5824 if (rc)
5825 return rc;
5826
5827 rc = pqi_alloc_operational_queues(ctrl_info);
5828 if (rc)
5829 return rc;
5830
5831 pqi_init_operational_queues(ctrl_info);
5832
5833 rc = pqi_request_irqs(ctrl_info);
5834 if (rc)
5835 return rc;
5836
Kevin Barnett6c223762016-06-27 16:41:00 -05005837 rc = pqi_create_queues(ctrl_info);
5838 if (rc)
5839 return rc;
5840
Kevin Barnett061ef062017-05-03 18:53:05 -05005841 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5842
5843 ctrl_info->controller_online = true;
5844 pqi_start_heartbeat_timer(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005845
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05005846 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005847 if (rc) {
5848 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05005849 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05005850 return rc;
5851 }
5852
Kevin Barnett6c223762016-06-27 16:41:00 -05005853 /* Register with the SCSI subsystem. */
5854 rc = pqi_register_scsi(ctrl_info);
5855 if (rc)
5856 return rc;
5857
5858 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5859 if (rc) {
5860 dev_err(&ctrl_info->pci_dev->dev,
5861 "error obtaining firmware version\n");
5862 return rc;
5863 }
5864
5865 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5866 if (rc) {
5867 dev_err(&ctrl_info->pci_dev->dev,
5868 "error updating host wellness\n");
5869 return rc;
5870 }
5871
5872 pqi_schedule_update_time_worker(ctrl_info);
5873
5874 pqi_scan_scsi_devices(ctrl_info);
5875
5876 return 0;
5877}
5878
Kevin Barnett061ef062017-05-03 18:53:05 -05005879#if defined(CONFIG_PM)
5880
5881static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
5882{
5883 unsigned int i;
5884 struct pqi_admin_queues *admin_queues;
5885 struct pqi_event_queue *event_queue;
5886
5887 admin_queues = &ctrl_info->admin_queues;
5888 admin_queues->iq_pi_copy = 0;
5889 admin_queues->oq_ci_copy = 0;
5890 *admin_queues->oq_pi = 0;
5891
5892 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5893 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
5894 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
5895 ctrl_info->queue_groups[i].oq_ci_copy = 0;
5896
5897 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
5898 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
5899 *ctrl_info->queue_groups[i].oq_pi = 0;
5900 }
5901
5902 event_queue = &ctrl_info->event_queue;
5903 *event_queue->oq_pi = 0;
5904 event_queue->oq_ci_copy = 0;
5905}
5906
5907static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
5908{
5909 int rc;
5910
5911 rc = pqi_force_sis_mode(ctrl_info);
5912 if (rc)
5913 return rc;
5914
5915 /*
5916 * Wait until the controller is ready to start accepting SIS
5917 * commands.
5918 */
5919 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
5920 if (rc)
5921 return rc;
5922
5923 /*
5924 * If the function we are about to call succeeds, the
5925 * controller will transition from legacy SIS mode
5926 * into PQI mode.
5927 */
5928 rc = sis_init_base_struct_addr(ctrl_info);
5929 if (rc) {
5930 dev_err(&ctrl_info->pci_dev->dev,
5931 "error initializing PQI mode\n");
5932 return rc;
5933 }
5934
5935 /* Wait for the controller to complete the SIS -> PQI transition. */
5936 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5937 if (rc) {
5938 dev_err(&ctrl_info->pci_dev->dev,
5939 "transition to PQI mode failed\n");
5940 return rc;
5941 }
5942
5943 /* From here on, we are running in PQI mode. */
5944 ctrl_info->pqi_mode_enabled = true;
5945 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5946
5947 pqi_reinit_queues(ctrl_info);
5948
5949 rc = pqi_create_admin_queues(ctrl_info);
5950 if (rc) {
5951 dev_err(&ctrl_info->pci_dev->dev,
5952 "error creating admin queues\n");
5953 return rc;
5954 }
5955
5956 rc = pqi_create_queues(ctrl_info);
5957 if (rc)
5958 return rc;
5959
5960 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5961
5962 ctrl_info->controller_online = true;
5963 pqi_start_heartbeat_timer(ctrl_info);
5964 pqi_ctrl_unblock_requests(ctrl_info);
5965
5966 rc = pqi_enable_events(ctrl_info);
5967 if (rc) {
5968 dev_err(&ctrl_info->pci_dev->dev,
5969 "error configuring events\n");
5970 return rc;
5971 }
5972
5973 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5974 if (rc) {
5975 dev_err(&ctrl_info->pci_dev->dev,
5976 "error updating host wellness\n");
5977 return rc;
5978 }
5979
5980 pqi_schedule_update_time_worker(ctrl_info);
5981
5982 pqi_scan_scsi_devices(ctrl_info);
5983
5984 return 0;
5985}
5986
5987#endif /* CONFIG_PM */
5988
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05005989static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
5990 u16 timeout)
5991{
5992 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
5993 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
5994}
5995
Kevin Barnett6c223762016-06-27 16:41:00 -05005996static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5997{
5998 int rc;
5999 u64 mask;
6000
6001 rc = pci_enable_device(ctrl_info->pci_dev);
6002 if (rc) {
6003 dev_err(&ctrl_info->pci_dev->dev,
6004 "failed to enable PCI device\n");
6005 return rc;
6006 }
6007
6008 if (sizeof(dma_addr_t) > 4)
6009 mask = DMA_BIT_MASK(64);
6010 else
6011 mask = DMA_BIT_MASK(32);
6012
6013 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6014 if (rc) {
6015 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6016 goto disable_device;
6017 }
6018
6019 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6020 if (rc) {
6021 dev_err(&ctrl_info->pci_dev->dev,
6022 "failed to obtain PCI resources\n");
6023 goto disable_device;
6024 }
6025
6026 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6027 ctrl_info->pci_dev, 0),
6028 sizeof(struct pqi_ctrl_registers));
6029 if (!ctrl_info->iomem_base) {
6030 dev_err(&ctrl_info->pci_dev->dev,
6031 "failed to map memory for controller registers\n");
6032 rc = -ENOMEM;
6033 goto release_regions;
6034 }
6035
6036 ctrl_info->registers = ctrl_info->iomem_base;
6037 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6038
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006039#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6040
6041 /* Increase the PCIe completion timeout. */
6042 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6043 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6044 if (rc) {
6045 dev_err(&ctrl_info->pci_dev->dev,
6046 "failed to set PCIe completion timeout\n");
6047 goto release_regions;
6048 }
6049
Kevin Barnett6c223762016-06-27 16:41:00 -05006050 /* Enable bus mastering. */
6051 pci_set_master(ctrl_info->pci_dev);
6052
6053 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6054
6055 return 0;
6056
6057release_regions:
6058 pci_release_regions(ctrl_info->pci_dev);
6059disable_device:
6060 pci_disable_device(ctrl_info->pci_dev);
6061
6062 return rc;
6063}
6064
6065static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6066{
6067 iounmap(ctrl_info->iomem_base);
6068 pci_release_regions(ctrl_info->pci_dev);
6069 pci_disable_device(ctrl_info->pci_dev);
6070 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6071}
6072
6073static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6074{
6075 struct pqi_ctrl_info *ctrl_info;
6076
6077 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6078 GFP_KERNEL, numa_node);
6079 if (!ctrl_info)
6080 return NULL;
6081
6082 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05006083 mutex_init(&ctrl_info->lun_reset_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05006084
6085 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6086 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6087
6088 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6089 atomic_set(&ctrl_info->num_interrupts, 0);
6090
6091 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6092 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6093
6094 sema_init(&ctrl_info->sync_request_sem,
6095 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05006096 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05006097
6098 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
Kevin Barnett061ef062017-05-03 18:53:05 -05006099 ctrl_info->irq_mode = IRQ_MODE_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -05006100 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6101
6102 return ctrl_info;
6103}
6104
6105static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6106{
6107 kfree(ctrl_info);
6108}
6109
6110static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6111{
Kevin Barnett98bf0612017-05-03 18:52:28 -05006112 pqi_free_irqs(ctrl_info);
6113 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006114}
6115
6116static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6117{
6118 pqi_stop_heartbeat_timer(ctrl_info);
6119 pqi_free_interrupts(ctrl_info);
6120 if (ctrl_info->queue_memory_base)
6121 dma_free_coherent(&ctrl_info->pci_dev->dev,
6122 ctrl_info->queue_memory_length,
6123 ctrl_info->queue_memory_base,
6124 ctrl_info->queue_memory_base_dma_handle);
6125 if (ctrl_info->admin_queue_memory_base)
6126 dma_free_coherent(&ctrl_info->pci_dev->dev,
6127 ctrl_info->admin_queue_memory_length,
6128 ctrl_info->admin_queue_memory_base,
6129 ctrl_info->admin_queue_memory_base_dma_handle);
6130 pqi_free_all_io_requests(ctrl_info);
6131 if (ctrl_info->error_buffer)
6132 dma_free_coherent(&ctrl_info->pci_dev->dev,
6133 ctrl_info->error_buffer_length,
6134 ctrl_info->error_buffer,
6135 ctrl_info->error_buffer_dma_handle);
6136 if (ctrl_info->iomem_base)
6137 pqi_cleanup_pci_init(ctrl_info);
6138 pqi_free_ctrl_info(ctrl_info);
6139}
6140
6141static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6142{
Kevin Barnett061ef062017-05-03 18:53:05 -05006143 pqi_cancel_rescan_worker(ctrl_info);
6144 pqi_cancel_update_time_worker(ctrl_info);
Kevin Barnette57a1f92016-08-31 14:54:47 -05006145 pqi_remove_all_scsi_devices(ctrl_info);
6146 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05006147 if (ctrl_info->pqi_mode_enabled)
6148 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006149 pqi_free_ctrl_resources(ctrl_info);
6150}
6151
6152static void pqi_print_ctrl_info(struct pci_dev *pdev,
6153 const struct pci_device_id *id)
6154{
6155 char *ctrl_description;
6156
6157 if (id->driver_data) {
6158 ctrl_description = (char *)id->driver_data;
6159 } else {
6160 switch (id->subvendor) {
6161 case PCI_VENDOR_ID_HP:
6162 ctrl_description = hpe_branded_controller;
6163 break;
6164 case PCI_VENDOR_ID_ADAPTEC2:
6165 default:
6166 ctrl_description = microsemi_branded_controller;
6167 break;
6168 }
6169 }
6170
6171 dev_info(&pdev->dev, "%s found\n", ctrl_description);
6172}
6173
6174static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6175{
6176 int rc;
6177 int node;
6178 struct pqi_ctrl_info *ctrl_info;
6179
6180 pqi_print_ctrl_info(pdev, id);
6181
6182 if (pqi_disable_device_id_wildcards &&
6183 id->subvendor == PCI_ANY_ID &&
6184 id->subdevice == PCI_ANY_ID) {
6185 dev_warn(&pdev->dev,
6186 "controller not probed because device ID wildcards are disabled\n");
6187 return -ENODEV;
6188 }
6189
6190 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
6191 dev_warn(&pdev->dev,
6192 "controller device ID matched using wildcards\n");
6193
6194 node = dev_to_node(&pdev->dev);
6195 if (node == NUMA_NO_NODE)
6196 set_dev_node(&pdev->dev, 0);
6197
6198 ctrl_info = pqi_alloc_ctrl_info(node);
6199 if (!ctrl_info) {
6200 dev_err(&pdev->dev,
6201 "failed to allocate controller info block\n");
6202 return -ENOMEM;
6203 }
6204
6205 ctrl_info->pci_dev = pdev;
6206
6207 rc = pqi_pci_init(ctrl_info);
6208 if (rc)
6209 goto error;
6210
6211 rc = pqi_ctrl_init(ctrl_info);
6212 if (rc)
6213 goto error;
6214
6215 return 0;
6216
6217error:
6218 pqi_remove_ctrl(ctrl_info);
6219
6220 return rc;
6221}
6222
6223static void pqi_pci_remove(struct pci_dev *pdev)
6224{
6225 struct pqi_ctrl_info *ctrl_info;
6226
6227 ctrl_info = pci_get_drvdata(pdev);
6228 if (!ctrl_info)
6229 return;
6230
6231 pqi_remove_ctrl(ctrl_info);
6232}
6233
6234static void pqi_shutdown(struct pci_dev *pdev)
6235{
6236 int rc;
6237 struct pqi_ctrl_info *ctrl_info;
6238
6239 ctrl_info = pci_get_drvdata(pdev);
6240 if (!ctrl_info)
6241 goto error;
6242
6243 /*
6244 * Write all data in the controller's battery-backed cache to
6245 * storage.
6246 */
6247 rc = pqi_flush_cache(ctrl_info);
6248 if (rc == 0)
6249 return;
6250
6251error:
6252 dev_warn(&pdev->dev,
6253 "unable to flush controller cache\n");
6254}
6255
Kevin Barnett061ef062017-05-03 18:53:05 -05006256#if defined(CONFIG_PM)
6257
6258static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6259{
6260 struct pqi_ctrl_info *ctrl_info;
6261
6262 ctrl_info = pci_get_drvdata(pci_dev);
6263
6264 pqi_disable_events(ctrl_info);
6265 pqi_cancel_update_time_worker(ctrl_info);
6266 pqi_cancel_rescan_worker(ctrl_info);
6267 pqi_wait_until_scan_finished(ctrl_info);
6268 pqi_wait_until_lun_reset_finished(ctrl_info);
6269 pqi_flush_cache(ctrl_info);
6270 pqi_ctrl_block_requests(ctrl_info);
6271 pqi_ctrl_wait_until_quiesced(ctrl_info);
6272 pqi_wait_until_inbound_queues_empty(ctrl_info);
6273 pqi_ctrl_wait_for_pending_io(ctrl_info);
6274 pqi_stop_heartbeat_timer(ctrl_info);
6275
6276 if (state.event == PM_EVENT_FREEZE)
6277 return 0;
6278
6279 pci_save_state(pci_dev);
6280 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6281
6282 ctrl_info->controller_online = false;
6283 ctrl_info->pqi_mode_enabled = false;
6284
6285 return 0;
6286}
6287
6288static int pqi_resume(struct pci_dev *pci_dev)
6289{
6290 int rc;
6291 struct pqi_ctrl_info *ctrl_info;
6292
6293 ctrl_info = pci_get_drvdata(pci_dev);
6294
6295 if (pci_dev->current_state != PCI_D0) {
6296 ctrl_info->max_hw_queue_index = 0;
6297 pqi_free_interrupts(ctrl_info);
6298 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6299 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6300 IRQF_SHARED, DRIVER_NAME_SHORT,
6301 &ctrl_info->queue_groups[0]);
6302 if (rc) {
6303 dev_err(&ctrl_info->pci_dev->dev,
6304 "irq %u init failed with error %d\n",
6305 pci_dev->irq, rc);
6306 return rc;
6307 }
6308 pqi_start_heartbeat_timer(ctrl_info);
6309 pqi_ctrl_unblock_requests(ctrl_info);
6310 return 0;
6311 }
6312
6313 pci_set_power_state(pci_dev, PCI_D0);
6314 pci_restore_state(pci_dev);
6315
6316 return pqi_ctrl_init_resume(ctrl_info);
6317}
6318
6319#endif /* CONFIG_PM */
6320
Kevin Barnett6c223762016-06-27 16:41:00 -05006321/* Define the PCI IDs for the controllers that we support. */
6322static const struct pci_device_id pqi_pci_id_table[] = {
6323 {
6324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6325 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6326 },
6327 {
6328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6329 PCI_VENDOR_ID_HP, 0x0600)
6330 },
6331 {
6332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6333 PCI_VENDOR_ID_HP, 0x0601)
6334 },
6335 {
6336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6337 PCI_VENDOR_ID_HP, 0x0602)
6338 },
6339 {
6340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6341 PCI_VENDOR_ID_HP, 0x0603)
6342 },
6343 {
6344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6345 PCI_VENDOR_ID_HP, 0x0650)
6346 },
6347 {
6348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6349 PCI_VENDOR_ID_HP, 0x0651)
6350 },
6351 {
6352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6353 PCI_VENDOR_ID_HP, 0x0652)
6354 },
6355 {
6356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6357 PCI_VENDOR_ID_HP, 0x0653)
6358 },
6359 {
6360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6361 PCI_VENDOR_ID_HP, 0x0654)
6362 },
6363 {
6364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6365 PCI_VENDOR_ID_HP, 0x0655)
6366 },
6367 {
6368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6369 PCI_VENDOR_ID_HP, 0x0700)
6370 },
6371 {
6372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6373 PCI_VENDOR_ID_HP, 0x0701)
6374 },
6375 {
6376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6377 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6378 },
6379 {
6380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6381 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6382 },
6383 {
6384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6385 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6386 },
6387 {
6388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6389 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6390 },
6391 {
6392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6393 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6394 },
6395 {
6396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6397 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6398 },
6399 {
6400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6401 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6402 },
6403 {
6404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6405 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6406 },
6407 {
6408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6409 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6410 },
6411 {
6412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6413 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6414 },
6415 {
6416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6417 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6418 },
6419 {
6420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6421 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6422 },
6423 {
6424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6425 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6426 },
6427 {
6428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6429 PCI_VENDOR_ID_HP, 0x1001)
6430 },
6431 {
6432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6433 PCI_VENDOR_ID_HP, 0x1100)
6434 },
6435 {
6436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6437 PCI_VENDOR_ID_HP, 0x1101)
6438 },
6439 {
6440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6441 PCI_VENDOR_ID_HP, 0x1102)
6442 },
6443 {
6444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6445 PCI_VENDOR_ID_HP, 0x1150)
6446 },
6447 {
6448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6449 PCI_ANY_ID, PCI_ANY_ID)
6450 },
6451 { 0 }
6452};
6453
6454MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6455
6456static struct pci_driver pqi_pci_driver = {
6457 .name = DRIVER_NAME_SHORT,
6458 .id_table = pqi_pci_id_table,
6459 .probe = pqi_pci_probe,
6460 .remove = pqi_pci_remove,
6461 .shutdown = pqi_shutdown,
Kevin Barnett061ef062017-05-03 18:53:05 -05006462#if defined(CONFIG_PM)
6463 .suspend = pqi_suspend,
6464 .resume = pqi_resume,
6465#endif
Kevin Barnett6c223762016-06-27 16:41:00 -05006466};
6467
6468static int __init pqi_init(void)
6469{
6470 int rc;
6471
6472 pr_info(DRIVER_NAME "\n");
6473
6474 pqi_sas_transport_template =
6475 sas_attach_transport(&pqi_sas_transport_functions);
6476 if (!pqi_sas_transport_template)
6477 return -ENODEV;
6478
6479 rc = pci_register_driver(&pqi_pci_driver);
6480 if (rc)
6481 sas_release_transport(pqi_sas_transport_template);
6482
6483 return rc;
6484}
6485
6486static void __exit pqi_cleanup(void)
6487{
6488 pci_unregister_driver(&pqi_pci_driver);
6489 sas_release_transport(pqi_sas_transport_template);
6490}
6491
6492module_init(pqi_init);
6493module_exit(pqi_cleanup);
6494
6495static void __attribute__((unused)) verify_structures(void)
6496{
6497 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6498 sis_host_to_ctrl_doorbell) != 0x20);
6499 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6500 sis_interrupt_mask) != 0x34);
6501 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6502 sis_ctrl_to_host_doorbell) != 0x9c);
6503 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6504 sis_ctrl_to_host_doorbell_clear) != 0xa0);
6505 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05006506 sis_driver_scratch) != 0xb0);
6507 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05006508 sis_firmware_status) != 0xbc);
6509 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6510 sis_mailbox) != 0x1000);
6511 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6512 pqi_registers) != 0x4000);
6513
6514 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6515 iu_type) != 0x0);
6516 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6517 iu_length) != 0x2);
6518 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6519 response_queue_id) != 0x4);
6520 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6521 work_area) != 0x6);
6522 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6523
6524 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6525 status) != 0x0);
6526 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6527 service_response) != 0x1);
6528 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6529 data_present) != 0x2);
6530 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6531 reserved) != 0x3);
6532 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6533 residual_count) != 0x4);
6534 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6535 data_length) != 0x8);
6536 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6537 reserved1) != 0xa);
6538 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6539 data) != 0xc);
6540 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6541
6542 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6543 data_in_result) != 0x0);
6544 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6545 data_out_result) != 0x1);
6546 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6547 reserved) != 0x2);
6548 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6549 status) != 0x5);
6550 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6551 status_qualifier) != 0x6);
6552 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6553 sense_data_length) != 0x8);
6554 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6555 response_data_length) != 0xa);
6556 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6557 data_in_transferred) != 0xc);
6558 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6559 data_out_transferred) != 0x10);
6560 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6561 data) != 0x14);
6562 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6563
6564 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6565 signature) != 0x0);
6566 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6567 function_and_status_code) != 0x8);
6568 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6569 max_admin_iq_elements) != 0x10);
6570 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6571 max_admin_oq_elements) != 0x11);
6572 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6573 admin_iq_element_length) != 0x12);
6574 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6575 admin_oq_element_length) != 0x13);
6576 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6577 max_reset_timeout) != 0x14);
6578 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6579 legacy_intx_status) != 0x18);
6580 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6581 legacy_intx_mask_set) != 0x1c);
6582 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6583 legacy_intx_mask_clear) != 0x20);
6584 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6585 device_status) != 0x40);
6586 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6587 admin_iq_pi_offset) != 0x48);
6588 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6589 admin_oq_ci_offset) != 0x50);
6590 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6591 admin_iq_element_array_addr) != 0x58);
6592 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6593 admin_oq_element_array_addr) != 0x60);
6594 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6595 admin_iq_ci_addr) != 0x68);
6596 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6597 admin_oq_pi_addr) != 0x70);
6598 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6599 admin_iq_num_elements) != 0x78);
6600 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6601 admin_oq_num_elements) != 0x79);
6602 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6603 admin_queue_int_msg_num) != 0x7a);
6604 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6605 device_error) != 0x80);
6606 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6607 error_details) != 0x88);
6608 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6609 device_reset) != 0x90);
6610 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6611 power_action) != 0x94);
6612 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6613
6614 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6615 header.iu_type) != 0);
6616 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6617 header.iu_length) != 2);
6618 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6619 header.work_area) != 6);
6620 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6621 request_id) != 8);
6622 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6623 function_code) != 10);
6624 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6625 data.report_device_capability.buffer_length) != 44);
6626 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6627 data.report_device_capability.sg_descriptor) != 48);
6628 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6629 data.create_operational_iq.queue_id) != 12);
6630 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6631 data.create_operational_iq.element_array_addr) != 16);
6632 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6633 data.create_operational_iq.ci_addr) != 24);
6634 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6635 data.create_operational_iq.num_elements) != 32);
6636 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6637 data.create_operational_iq.element_length) != 34);
6638 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6639 data.create_operational_iq.queue_protocol) != 36);
6640 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6641 data.create_operational_oq.queue_id) != 12);
6642 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6643 data.create_operational_oq.element_array_addr) != 16);
6644 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6645 data.create_operational_oq.pi_addr) != 24);
6646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6647 data.create_operational_oq.num_elements) != 32);
6648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6649 data.create_operational_oq.element_length) != 34);
6650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6651 data.create_operational_oq.queue_protocol) != 36);
6652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6653 data.create_operational_oq.int_msg_num) != 40);
6654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6655 data.create_operational_oq.coalescing_count) != 42);
6656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6657 data.create_operational_oq.min_coalescing_time) != 44);
6658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6659 data.create_operational_oq.max_coalescing_time) != 48);
6660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6661 data.delete_operational_queue.queue_id) != 12);
6662 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6663 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6664 data.create_operational_iq) != 64 - 11);
6665 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6666 data.create_operational_oq) != 64 - 11);
6667 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6668 data.delete_operational_queue) != 64 - 11);
6669
6670 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6671 header.iu_type) != 0);
6672 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6673 header.iu_length) != 2);
6674 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6675 header.work_area) != 6);
6676 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6677 request_id) != 8);
6678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6679 function_code) != 10);
6680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6681 status) != 11);
6682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6683 data.create_operational_iq.status_descriptor) != 12);
6684 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6685 data.create_operational_iq.iq_pi_offset) != 16);
6686 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6687 data.create_operational_oq.status_descriptor) != 12);
6688 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6689 data.create_operational_oq.oq_ci_offset) != 16);
6690 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6691
6692 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6693 header.iu_type) != 0);
6694 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6695 header.iu_length) != 2);
6696 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6697 header.response_queue_id) != 4);
6698 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6699 header.work_area) != 6);
6700 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6701 request_id) != 8);
6702 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6703 nexus_id) != 10);
6704 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6705 buffer_length) != 12);
6706 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6707 lun_number) != 16);
6708 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6709 protocol_specific) != 24);
6710 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6711 error_index) != 27);
6712 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6713 cdb) != 32);
6714 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6715 sg_descriptors) != 64);
6716 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6717 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6718
6719 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6720 header.iu_type) != 0);
6721 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6722 header.iu_length) != 2);
6723 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6724 header.response_queue_id) != 4);
6725 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6726 header.work_area) != 6);
6727 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6728 request_id) != 8);
6729 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6730 nexus_id) != 12);
6731 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6732 buffer_length) != 16);
6733 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6734 data_encryption_key_index) != 22);
6735 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6736 encrypt_tweak_lower) != 24);
6737 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6738 encrypt_tweak_upper) != 28);
6739 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6740 cdb) != 32);
6741 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6742 error_index) != 48);
6743 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6744 num_sg_descriptors) != 50);
6745 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6746 cdb_length) != 51);
6747 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6748 lun_number) != 52);
6749 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6750 sg_descriptors) != 64);
6751 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6752 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6753
6754 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6755 header.iu_type) != 0);
6756 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6757 header.iu_length) != 2);
6758 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6759 request_id) != 8);
6760 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6761 error_index) != 10);
6762
6763 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6764 header.iu_type) != 0);
6765 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6766 header.iu_length) != 2);
6767 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6768 header.response_queue_id) != 4);
6769 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6770 request_id) != 8);
6771 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6772 data.report_event_configuration.buffer_length) != 12);
6773 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6774 data.report_event_configuration.sg_descriptors) != 16);
6775 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6776 data.set_event_configuration.global_event_oq_id) != 10);
6777 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6778 data.set_event_configuration.buffer_length) != 12);
6779 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6780 data.set_event_configuration.sg_descriptors) != 16);
6781
6782 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6783 max_inbound_iu_length) != 6);
6784 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6785 max_outbound_iu_length) != 14);
6786 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6787
6788 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6789 data_length) != 0);
6790 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6791 iq_arbitration_priority_support_bitmask) != 8);
6792 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6793 maximum_aw_a) != 9);
6794 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6795 maximum_aw_b) != 10);
6796 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6797 maximum_aw_c) != 11);
6798 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6799 max_inbound_queues) != 16);
6800 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6801 max_elements_per_iq) != 18);
6802 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6803 max_iq_element_length) != 24);
6804 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6805 min_iq_element_length) != 26);
6806 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6807 max_outbound_queues) != 30);
6808 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6809 max_elements_per_oq) != 32);
6810 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6811 intr_coalescing_time_granularity) != 34);
6812 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6813 max_oq_element_length) != 36);
6814 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6815 min_oq_element_length) != 38);
6816 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6817 iu_layer_descriptors) != 64);
6818 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6819
6820 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6821 event_type) != 0);
6822 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6823 oq_id) != 2);
6824 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6825
6826 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6827 num_event_descriptors) != 2);
6828 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6829 descriptors) != 4);
6830
Kevin Barnett061ef062017-05-03 18:53:05 -05006831 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
6832 ARRAY_SIZE(pqi_supported_event_types));
6833
Kevin Barnett6c223762016-06-27 16:41:00 -05006834 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6835 header.iu_type) != 0);
6836 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6837 header.iu_length) != 2);
6838 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6839 event_type) != 8);
6840 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6841 event_id) != 10);
6842 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6843 additional_event_id) != 12);
6844 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6845 data) != 16);
6846 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6847
6848 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6849 header.iu_type) != 0);
6850 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6851 header.iu_length) != 2);
6852 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6853 event_type) != 8);
6854 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6855 event_id) != 10);
6856 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6857 additional_event_id) != 12);
6858 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6859
6860 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6861 header.iu_type) != 0);
6862 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6863 header.iu_length) != 2);
6864 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6865 request_id) != 8);
6866 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6867 nexus_id) != 10);
6868 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6869 lun_number) != 16);
6870 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6871 protocol_specific) != 24);
6872 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6873 outbound_queue_id_to_manage) != 26);
6874 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6875 request_id_to_manage) != 28);
6876 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6877 task_management_function) != 30);
6878 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6879
6880 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6881 header.iu_type) != 0);
6882 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6883 header.iu_length) != 2);
6884 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6885 request_id) != 8);
6886 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6887 nexus_id) != 10);
6888 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6889 additional_response_info) != 12);
6890 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6891 response_code) != 15);
6892 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6893
6894 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6895 configured_logical_drive_count) != 0);
6896 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6897 configuration_signature) != 1);
6898 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6899 firmware_version) != 5);
6900 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6901 extended_logical_unit_count) != 154);
6902 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6903 firmware_build_number) != 190);
6904 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6905 controller_mode) != 292);
6906
6907 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6908 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6909 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6910 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6911 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6912 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6913 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6914 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6915 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6916 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6917 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6918 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6919
6920 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6921}