blob: bd8a66d9acc055c375e694d52692a25c28528baf [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060028#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050029#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
Kevin Barnett699bed72016-08-31 14:55:36 -050042#define DRIVER_VERSION "0.9.13-370"
Kevin Barnett6c223762016-06-27 16:41:00 -050043#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
Kevin Barnett699bed72016-08-31 14:55:36 -050045#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
Kevin Barnett6c223762016-06-27 16:41:00 -050047
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
51MODULE_AUTHOR("Microsemi");
52MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 DRIVER_VERSION);
54MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
55MODULE_VERSION(DRIVER_VERSION);
56MODULE_LICENSE("GPL");
57
58#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
59
60static char *hpe_branded_controller = "HPE Smart Array Controller";
61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62
63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65static void pqi_scan_start(struct Scsi_Host *shost);
66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 struct pqi_io_request *io_request);
69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 struct pqi_iu_header *request, unsigned int flags,
71 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info);
76
77/* for flags argument to pqi_submit_raid_request_synchronous() */
78#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79
80static struct scsi_transport_template *pqi_sas_transport_template;
81
82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83
Kevin Barnett6a50d6a2017-05-03 18:52:52 -050084static unsigned int pqi_supported_event_types[] = {
85 PQI_EVENT_TYPE_HOTPLUG,
86 PQI_EVENT_TYPE_HARDWARE,
87 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
88 PQI_EVENT_TYPE_LOGICAL_DEVICE,
89 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
90 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
91};
92
Kevin Barnett6c223762016-06-27 16:41:00 -050093static int pqi_disable_device_id_wildcards;
94module_param_named(disable_device_id_wildcards,
95 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
96MODULE_PARM_DESC(disable_device_id_wildcards,
97 "Disable device ID wildcards.");
98
99static char *raid_levels[] = {
100 "RAID-0",
101 "RAID-4",
102 "RAID-1(1+0)",
103 "RAID-5",
104 "RAID-5+1",
105 "RAID-ADG",
106 "RAID-1(ADM)",
107};
108
109static char *pqi_raid_level_to_string(u8 raid_level)
110{
111 if (raid_level < ARRAY_SIZE(raid_levels))
112 return raid_levels[raid_level];
113
114 return "";
115}
116
117#define SA_RAID_0 0
118#define SA_RAID_4 1
119#define SA_RAID_1 2 /* also used for RAID 10 */
120#define SA_RAID_5 3 /* also used for RAID 50 */
121#define SA_RAID_51 4
122#define SA_RAID_6 5 /* also used for RAID 60 */
123#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
124#define SA_RAID_MAX SA_RAID_ADM
125#define SA_RAID_UNKNOWN 0xff
126
127static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
128{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500129 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500130 scmd->scsi_done(scmd);
131}
132
133static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
134{
135 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
136}
137
138static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
139{
140 void *hostdata = shost_priv(shost);
141
142 return *((struct pqi_ctrl_info **)hostdata);
143}
144
145static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
146{
147 return !device->is_physical_device;
148}
149
150static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
151{
152 return !ctrl_info->controller_online;
153}
154
155static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
156{
157 if (ctrl_info->controller_online)
158 if (!sis_is_firmware_running(ctrl_info))
159 pqi_take_ctrl_offline(ctrl_info);
160}
161
162static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
163{
164 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
165}
166
Kevin Barnettff6abb72016-08-31 14:54:41 -0500167static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
168 struct pqi_ctrl_info *ctrl_info)
169{
170 return sis_read_driver_scratch(ctrl_info);
171}
172
173static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
174 enum pqi_ctrl_mode mode)
175{
176 sis_write_driver_scratch(ctrl_info, mode);
177}
178
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500179#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
180static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
181{
182 ctrl_info->block_requests = true;
183 scsi_block_requests(ctrl_info->scsi_host);
184}
185
186static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
187{
188 ctrl_info->block_requests = false;
189 wake_up_all(&ctrl_info->block_requests_wait);
190 scsi_unblock_requests(ctrl_info->scsi_host);
191}
192
193static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
194{
195 return ctrl_info->block_requests;
196}
197
198static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
199 unsigned long timeout_msecs)
200{
201 unsigned long remaining_msecs;
202
203 if (!pqi_ctrl_blocked(ctrl_info))
204 return timeout_msecs;
205
206 atomic_inc(&ctrl_info->num_blocked_threads);
207
208 if (timeout_msecs == NO_TIMEOUT) {
209 wait_event(ctrl_info->block_requests_wait,
210 !pqi_ctrl_blocked(ctrl_info));
211 remaining_msecs = timeout_msecs;
212 } else {
213 unsigned long remaining_jiffies;
214
215 remaining_jiffies =
216 wait_event_timeout(ctrl_info->block_requests_wait,
217 !pqi_ctrl_blocked(ctrl_info),
218 msecs_to_jiffies(timeout_msecs));
219 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
220 }
221
222 atomic_dec(&ctrl_info->num_blocked_threads);
223
224 return remaining_msecs;
225}
226
227static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
228{
229 atomic_inc(&ctrl_info->num_busy_threads);
230}
231
232static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
233{
234 atomic_dec(&ctrl_info->num_busy_threads);
235}
236
237static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
238{
239 while (atomic_read(&ctrl_info->num_busy_threads) >
240 atomic_read(&ctrl_info->num_blocked_threads))
241 usleep_range(1000, 2000);
242}
243
244static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
245{
246 device->in_reset = true;
247}
248
249static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
250{
251 device->in_reset = false;
252}
253
254static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
255{
256 return device->in_reset;
257}
Kevin Barnett6c223762016-06-27 16:41:00 -0500258
259static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
260{
261 schedule_delayed_work(&ctrl_info->rescan_work,
262 PQI_RESCAN_WORK_INTERVAL);
263}
264
265static int pqi_map_single(struct pci_dev *pci_dev,
266 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
267 size_t buffer_length, int data_direction)
268{
269 dma_addr_t bus_address;
270
271 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
272 return 0;
273
274 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
275 data_direction);
276 if (pci_dma_mapping_error(pci_dev, bus_address))
277 return -ENOMEM;
278
279 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
280 put_unaligned_le32(buffer_length, &sg_descriptor->length);
281 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
282
283 return 0;
284}
285
286static void pqi_pci_unmap(struct pci_dev *pci_dev,
287 struct pqi_sg_descriptor *descriptors, int num_descriptors,
288 int data_direction)
289{
290 int i;
291
292 if (data_direction == PCI_DMA_NONE)
293 return;
294
295 for (i = 0; i < num_descriptors; i++)
296 pci_unmap_single(pci_dev,
297 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
298 get_unaligned_le32(&descriptors[i].length),
299 data_direction);
300}
301
302static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
303 struct pqi_raid_path_request *request, u8 cmd,
304 u8 *scsi3addr, void *buffer, size_t buffer_length,
305 u16 vpd_page, int *pci_direction)
306{
307 u8 *cdb;
308 int pci_dir;
309
310 memset(request, 0, sizeof(*request));
311
312 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
313 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
314 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
315 &request->header.iu_length);
316 put_unaligned_le32(buffer_length, &request->buffer_length);
317 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
318 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
319 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
320
321 cdb = request->cdb;
322
323 switch (cmd) {
324 case INQUIRY:
325 request->data_direction = SOP_READ_FLAG;
326 cdb[0] = INQUIRY;
327 if (vpd_page & VPD_PAGE) {
328 cdb[1] = 0x1;
329 cdb[2] = (u8)vpd_page;
330 }
331 cdb[4] = (u8)buffer_length;
332 break;
333 case CISS_REPORT_LOG:
334 case CISS_REPORT_PHYS:
335 request->data_direction = SOP_READ_FLAG;
336 cdb[0] = cmd;
337 if (cmd == CISS_REPORT_PHYS)
338 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
339 else
340 cdb[1] = CISS_REPORT_LOG_EXTENDED;
341 put_unaligned_be32(buffer_length, &cdb[6]);
342 break;
343 case CISS_GET_RAID_MAP:
344 request->data_direction = SOP_READ_FLAG;
345 cdb[0] = CISS_READ;
346 cdb[1] = CISS_GET_RAID_MAP;
347 put_unaligned_be32(buffer_length, &cdb[6]);
348 break;
349 case SA_CACHE_FLUSH:
350 request->data_direction = SOP_WRITE_FLAG;
351 cdb[0] = BMIC_WRITE;
352 cdb[6] = BMIC_CACHE_FLUSH;
353 put_unaligned_be16(buffer_length, &cdb[7]);
354 break;
355 case BMIC_IDENTIFY_CONTROLLER:
356 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
357 request->data_direction = SOP_READ_FLAG;
358 cdb[0] = BMIC_READ;
359 cdb[6] = cmd;
360 put_unaligned_be16(buffer_length, &cdb[7]);
361 break;
362 case BMIC_WRITE_HOST_WELLNESS:
363 request->data_direction = SOP_WRITE_FLAG;
364 cdb[0] = BMIC_WRITE;
365 cdb[6] = cmd;
366 put_unaligned_be16(buffer_length, &cdb[7]);
367 break;
368 default:
369 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
370 cmd);
371 WARN_ON(cmd);
372 break;
373 }
374
375 switch (request->data_direction) {
376 case SOP_READ_FLAG:
377 pci_dir = PCI_DMA_FROMDEVICE;
378 break;
379 case SOP_WRITE_FLAG:
380 pci_dir = PCI_DMA_TODEVICE;
381 break;
382 case SOP_NO_DIRECTION_FLAG:
383 pci_dir = PCI_DMA_NONE;
384 break;
385 default:
386 pci_dir = PCI_DMA_BIDIRECTIONAL;
387 break;
388 }
389
390 *pci_direction = pci_dir;
391
392 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
393 buffer, buffer_length, pci_dir);
394}
395
396static struct pqi_io_request *pqi_alloc_io_request(
397 struct pqi_ctrl_info *ctrl_info)
398{
399 struct pqi_io_request *io_request;
400 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
401
402 while (1) {
403 io_request = &ctrl_info->io_request_pool[i];
404 if (atomic_inc_return(&io_request->refcount) == 1)
405 break;
406 atomic_dec(&io_request->refcount);
407 i = (i + 1) % ctrl_info->max_io_slots;
408 }
409
410 /* benignly racy */
411 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
412
413 io_request->scmd = NULL;
414 io_request->status = 0;
415 io_request->error_info = NULL;
416
417 return io_request;
418}
419
420static void pqi_free_io_request(struct pqi_io_request *io_request)
421{
422 atomic_dec(&io_request->refcount);
423}
424
425static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
426 struct bmic_identify_controller *buffer)
427{
428 int rc;
429 int pci_direction;
430 struct pqi_raid_path_request request;
431
432 rc = pqi_build_raid_path_request(ctrl_info, &request,
433 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
434 sizeof(*buffer), 0, &pci_direction);
435 if (rc)
436 return rc;
437
438 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
439 NULL, NO_TIMEOUT);
440
441 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
442 pci_direction);
443
444 return rc;
445}
446
447static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
448 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
449{
450 int rc;
451 int pci_direction;
452 struct pqi_raid_path_request request;
453
454 rc = pqi_build_raid_path_request(ctrl_info, &request,
455 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
456 &pci_direction);
457 if (rc)
458 return rc;
459
460 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
461 NULL, NO_TIMEOUT);
462
463 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
464 pci_direction);
465
466 return rc;
467}
468
469static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
470 struct pqi_scsi_dev *device,
471 struct bmic_identify_physical_device *buffer,
472 size_t buffer_length)
473{
474 int rc;
475 int pci_direction;
476 u16 bmic_device_index;
477 struct pqi_raid_path_request request;
478
479 rc = pqi_build_raid_path_request(ctrl_info, &request,
480 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
481 buffer_length, 0, &pci_direction);
482 if (rc)
483 return rc;
484
485 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
486 request.cdb[2] = (u8)bmic_device_index;
487 request.cdb[9] = (u8)(bmic_device_index >> 8);
488
489 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
490 0, NULL, NO_TIMEOUT);
491
492 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
493 pci_direction);
494
495 return rc;
496}
497
498#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
Kevin Barnett6c223762016-06-27 16:41:00 -0500499
500static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
501{
502 int rc;
503 struct pqi_raid_path_request request;
504 int pci_direction;
505 u8 *buffer;
506
507 /*
508 * Don't bother trying to flush the cache if the controller is
509 * locked up.
510 */
511 if (pqi_ctrl_offline(ctrl_info))
512 return -ENXIO;
513
514 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
515 if (!buffer)
516 return -ENOMEM;
517
518 rc = pqi_build_raid_path_request(ctrl_info, &request,
519 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
520 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
521 if (rc)
522 goto out;
523
524 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
Kevin Barnettd48f8fa2016-08-31 14:55:17 -0500525 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500526
527 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
528 pci_direction);
529
530out:
531 kfree(buffer);
532
533 return rc;
534}
535
536static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
537 void *buffer, size_t buffer_length)
538{
539 int rc;
540 struct pqi_raid_path_request request;
541 int pci_direction;
542
543 rc = pqi_build_raid_path_request(ctrl_info, &request,
544 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
545 buffer_length, 0, &pci_direction);
546 if (rc)
547 return rc;
548
549 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
550 0, NULL, NO_TIMEOUT);
551
552 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
553 pci_direction);
554
555 return rc;
556}
557
558#pragma pack(1)
559
560struct bmic_host_wellness_driver_version {
561 u8 start_tag[4];
562 u8 driver_version_tag[2];
563 __le16 driver_version_length;
564 char driver_version[32];
565 u8 end_tag[2];
566};
567
568#pragma pack()
569
570static int pqi_write_driver_version_to_host_wellness(
571 struct pqi_ctrl_info *ctrl_info)
572{
573 int rc;
574 struct bmic_host_wellness_driver_version *buffer;
575 size_t buffer_length;
576
577 buffer_length = sizeof(*buffer);
578
579 buffer = kmalloc(buffer_length, GFP_KERNEL);
580 if (!buffer)
581 return -ENOMEM;
582
583 buffer->start_tag[0] = '<';
584 buffer->start_tag[1] = 'H';
585 buffer->start_tag[2] = 'W';
586 buffer->start_tag[3] = '>';
587 buffer->driver_version_tag[0] = 'D';
588 buffer->driver_version_tag[1] = 'V';
589 put_unaligned_le16(sizeof(buffer->driver_version),
590 &buffer->driver_version_length);
591 strncpy(buffer->driver_version, DRIVER_VERSION,
592 sizeof(buffer->driver_version) - 1);
593 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
594 buffer->end_tag[0] = 'Z';
595 buffer->end_tag[1] = 'Z';
596
597 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
598
599 kfree(buffer);
600
601 return rc;
602}
603
604#pragma pack(1)
605
606struct bmic_host_wellness_time {
607 u8 start_tag[4];
608 u8 time_tag[2];
609 __le16 time_length;
610 u8 time[8];
611 u8 dont_write_tag[2];
612 u8 end_tag[2];
613};
614
615#pragma pack()
616
617static int pqi_write_current_time_to_host_wellness(
618 struct pqi_ctrl_info *ctrl_info)
619{
620 int rc;
621 struct bmic_host_wellness_time *buffer;
622 size_t buffer_length;
623 time64_t local_time;
624 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100625 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500626
627 buffer_length = sizeof(*buffer);
628
629 buffer = kmalloc(buffer_length, GFP_KERNEL);
630 if (!buffer)
631 return -ENOMEM;
632
633 buffer->start_tag[0] = '<';
634 buffer->start_tag[1] = 'H';
635 buffer->start_tag[2] = 'W';
636 buffer->start_tag[3] = '>';
637 buffer->time_tag[0] = 'T';
638 buffer->time_tag[1] = 'D';
639 put_unaligned_le16(sizeof(buffer->time),
640 &buffer->time_length);
641
Arnd Bergmanned108582017-02-17 16:03:52 +0100642 local_time = ktime_get_real_seconds();
643 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500644 year = tm.tm_year + 1900;
645
646 buffer->time[0] = bin2bcd(tm.tm_hour);
647 buffer->time[1] = bin2bcd(tm.tm_min);
648 buffer->time[2] = bin2bcd(tm.tm_sec);
649 buffer->time[3] = 0;
650 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
651 buffer->time[5] = bin2bcd(tm.tm_mday);
652 buffer->time[6] = bin2bcd(year / 100);
653 buffer->time[7] = bin2bcd(year % 100);
654
655 buffer->dont_write_tag[0] = 'D';
656 buffer->dont_write_tag[1] = 'W';
657 buffer->end_tag[0] = 'Z';
658 buffer->end_tag[1] = 'Z';
659
660 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
661
662 kfree(buffer);
663
664 return rc;
665}
666
667#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
668
669static void pqi_update_time_worker(struct work_struct *work)
670{
671 int rc;
672 struct pqi_ctrl_info *ctrl_info;
673
674 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
675 update_time_work);
676
Kevin Barnett6c223762016-06-27 16:41:00 -0500677 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
678 if (rc)
679 dev_warn(&ctrl_info->pci_dev->dev,
680 "error updating time on controller\n");
681
682 schedule_delayed_work(&ctrl_info->update_time_work,
683 PQI_UPDATE_TIME_WORK_INTERVAL);
684}
685
686static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500687 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500688{
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500689 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -0500690}
691
692static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
693 void *buffer, size_t buffer_length)
694{
695 int rc;
696 int pci_direction;
697 struct pqi_raid_path_request request;
698
699 rc = pqi_build_raid_path_request(ctrl_info, &request,
700 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
701 if (rc)
702 return rc;
703
704 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
705 NULL, NO_TIMEOUT);
706
707 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
708 pci_direction);
709
710 return rc;
711}
712
713static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
714 void **buffer)
715{
716 int rc;
717 size_t lun_list_length;
718 size_t lun_data_length;
719 size_t new_lun_list_length;
720 void *lun_data = NULL;
721 struct report_lun_header *report_lun_header;
722
723 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
724 if (!report_lun_header) {
725 rc = -ENOMEM;
726 goto out;
727 }
728
729 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
730 sizeof(*report_lun_header));
731 if (rc)
732 goto out;
733
734 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
735
736again:
737 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
738
739 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
740 if (!lun_data) {
741 rc = -ENOMEM;
742 goto out;
743 }
744
745 if (lun_list_length == 0) {
746 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
747 goto out;
748 }
749
750 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
751 if (rc)
752 goto out;
753
754 new_lun_list_length = get_unaligned_be32(
755 &((struct report_lun_header *)lun_data)->list_length);
756
757 if (new_lun_list_length > lun_list_length) {
758 lun_list_length = new_lun_list_length;
759 kfree(lun_data);
760 goto again;
761 }
762
763out:
764 kfree(report_lun_header);
765
766 if (rc) {
767 kfree(lun_data);
768 lun_data = NULL;
769 }
770
771 *buffer = lun_data;
772
773 return rc;
774}
775
776static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
777 void **buffer)
778{
779 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
780 buffer);
781}
782
783static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
784 void **buffer)
785{
786 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
787}
788
789static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
790 struct report_phys_lun_extended **physdev_list,
791 struct report_log_lun_extended **logdev_list)
792{
793 int rc;
794 size_t logdev_list_length;
795 size_t logdev_data_length;
796 struct report_log_lun_extended *internal_logdev_list;
797 struct report_log_lun_extended *logdev_data;
798 struct report_lun_header report_lun_header;
799
800 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
801 if (rc)
802 dev_err(&ctrl_info->pci_dev->dev,
803 "report physical LUNs failed\n");
804
805 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
806 if (rc)
807 dev_err(&ctrl_info->pci_dev->dev,
808 "report logical LUNs failed\n");
809
810 /*
811 * Tack the controller itself onto the end of the logical device list.
812 */
813
814 logdev_data = *logdev_list;
815
816 if (logdev_data) {
817 logdev_list_length =
818 get_unaligned_be32(&logdev_data->header.list_length);
819 } else {
820 memset(&report_lun_header, 0, sizeof(report_lun_header));
821 logdev_data =
822 (struct report_log_lun_extended *)&report_lun_header;
823 logdev_list_length = 0;
824 }
825
826 logdev_data_length = sizeof(struct report_lun_header) +
827 logdev_list_length;
828
829 internal_logdev_list = kmalloc(logdev_data_length +
830 sizeof(struct report_log_lun_extended), GFP_KERNEL);
831 if (!internal_logdev_list) {
832 kfree(*logdev_list);
833 *logdev_list = NULL;
834 return -ENOMEM;
835 }
836
837 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
838 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
839 sizeof(struct report_log_lun_extended_entry));
840 put_unaligned_be32(logdev_list_length +
841 sizeof(struct report_log_lun_extended_entry),
842 &internal_logdev_list->header.list_length);
843
844 kfree(*logdev_list);
845 *logdev_list = internal_logdev_list;
846
847 return 0;
848}
849
850static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
851 int bus, int target, int lun)
852{
853 device->bus = bus;
854 device->target = target;
855 device->lun = lun;
856}
857
858static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
859{
860 u8 *scsi3addr;
861 u32 lunid;
862
863 scsi3addr = device->scsi3addr;
864 lunid = get_unaligned_le32(scsi3addr);
865
866 if (pqi_is_hba_lunid(scsi3addr)) {
867 /* The specified device is the controller. */
868 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
869 device->target_lun_valid = true;
870 return;
871 }
872
873 if (pqi_is_logical_device(device)) {
874 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
875 lunid & 0x3fff);
876 device->target_lun_valid = true;
877 return;
878 }
879
880 /*
881 * Defer target and LUN assignment for non-controller physical devices
882 * because the SAS transport layer will make these assignments later.
883 */
884 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
885}
886
887static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
888 struct pqi_scsi_dev *device)
889{
890 int rc;
891 u8 raid_level;
892 u8 *buffer;
893
894 raid_level = SA_RAID_UNKNOWN;
895
896 buffer = kmalloc(64, GFP_KERNEL);
897 if (buffer) {
898 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
899 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
900 if (rc == 0) {
901 raid_level = buffer[8];
902 if (raid_level > SA_RAID_MAX)
903 raid_level = SA_RAID_UNKNOWN;
904 }
905 kfree(buffer);
906 }
907
908 device->raid_level = raid_level;
909}
910
911static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
912 struct pqi_scsi_dev *device, struct raid_map *raid_map)
913{
914 char *err_msg;
915 u32 raid_map_size;
916 u32 r5or6_blocks_per_row;
917 unsigned int num_phys_disks;
918 unsigned int num_raid_map_entries;
919
920 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
921
922 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
923 err_msg = "RAID map too small";
924 goto bad_raid_map;
925 }
926
927 if (raid_map_size > sizeof(*raid_map)) {
928 err_msg = "RAID map too large";
929 goto bad_raid_map;
930 }
931
932 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
933 (get_unaligned_le16(&raid_map->data_disks_per_row) +
934 get_unaligned_le16(&raid_map->metadata_disks_per_row));
935 num_raid_map_entries = num_phys_disks *
936 get_unaligned_le16(&raid_map->row_cnt);
937
938 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
939 err_msg = "invalid number of map entries in RAID map";
940 goto bad_raid_map;
941 }
942
943 if (device->raid_level == SA_RAID_1) {
944 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
945 err_msg = "invalid RAID-1 map";
946 goto bad_raid_map;
947 }
948 } else if (device->raid_level == SA_RAID_ADM) {
949 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
950 err_msg = "invalid RAID-1(ADM) map";
951 goto bad_raid_map;
952 }
953 } else if ((device->raid_level == SA_RAID_5 ||
954 device->raid_level == SA_RAID_6) &&
955 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
956 /* RAID 50/60 */
957 r5or6_blocks_per_row =
958 get_unaligned_le16(&raid_map->strip_size) *
959 get_unaligned_le16(&raid_map->data_disks_per_row);
960 if (r5or6_blocks_per_row == 0) {
961 err_msg = "invalid RAID-5 or RAID-6 map";
962 goto bad_raid_map;
963 }
964 }
965
966 return 0;
967
968bad_raid_map:
969 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
970
971 return -EINVAL;
972}
973
974static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
975 struct pqi_scsi_dev *device)
976{
977 int rc;
978 int pci_direction;
979 struct pqi_raid_path_request request;
980 struct raid_map *raid_map;
981
982 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
983 if (!raid_map)
984 return -ENOMEM;
985
986 rc = pqi_build_raid_path_request(ctrl_info, &request,
987 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
988 sizeof(*raid_map), 0, &pci_direction);
989 if (rc)
990 goto error;
991
992 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
993 NULL, NO_TIMEOUT);
994
995 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
996 pci_direction);
997
998 if (rc)
999 goto error;
1000
1001 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1002 if (rc)
1003 goto error;
1004
1005 device->raid_map = raid_map;
1006
1007 return 0;
1008
1009error:
1010 kfree(raid_map);
1011
1012 return rc;
1013}
1014
1015static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1016 struct pqi_scsi_dev *device)
1017{
1018 int rc;
1019 u8 *buffer;
1020 u8 offload_status;
1021
1022 buffer = kmalloc(64, GFP_KERNEL);
1023 if (!buffer)
1024 return;
1025
1026 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1027 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1028 if (rc)
1029 goto out;
1030
1031#define OFFLOAD_STATUS_BYTE 4
1032#define OFFLOAD_CONFIGURED_BIT 0x1
1033#define OFFLOAD_ENABLED_BIT 0x2
1034
1035 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1036 device->offload_configured =
1037 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1038 if (device->offload_configured) {
1039 device->offload_enabled_pending =
1040 !!(offload_status & OFFLOAD_ENABLED_BIT);
1041 if (pqi_get_raid_map(ctrl_info, device))
1042 device->offload_enabled_pending = false;
1043 }
1044
1045out:
1046 kfree(buffer);
1047}
1048
1049/*
1050 * Use vendor-specific VPD to determine online/offline status of a volume.
1051 */
1052
1053static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1054 struct pqi_scsi_dev *device)
1055{
1056 int rc;
1057 size_t page_length;
1058 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1059 bool volume_offline = true;
1060 u32 volume_flags;
1061 struct ciss_vpd_logical_volume_status *vpd;
1062
1063 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1064 if (!vpd)
1065 goto no_buffer;
1066
1067 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1068 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1069 if (rc)
1070 goto out;
1071
1072 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1073 volume_status) + vpd->page_length;
1074 if (page_length < sizeof(*vpd))
1075 goto out;
1076
1077 volume_status = vpd->volume_status;
1078 volume_flags = get_unaligned_be32(&vpd->flags);
1079 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1080
1081out:
1082 kfree(vpd);
1083no_buffer:
1084 device->volume_status = volume_status;
1085 device->volume_offline = volume_offline;
1086}
1087
1088static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1089 struct pqi_scsi_dev *device)
1090{
1091 int rc;
1092 u8 *buffer;
1093
1094 buffer = kmalloc(64, GFP_KERNEL);
1095 if (!buffer)
1096 return -ENOMEM;
1097
1098 /* Send an inquiry to the device to see what it is. */
1099 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1100 if (rc)
1101 goto out;
1102
1103 scsi_sanitize_inquiry_string(&buffer[8], 8);
1104 scsi_sanitize_inquiry_string(&buffer[16], 16);
1105
1106 device->devtype = buffer[0] & 0x1f;
1107 memcpy(device->vendor, &buffer[8],
1108 sizeof(device->vendor));
1109 memcpy(device->model, &buffer[16],
1110 sizeof(device->model));
1111
1112 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1113 pqi_get_raid_level(ctrl_info, device);
1114 pqi_get_offload_status(ctrl_info, device);
1115 pqi_get_volume_status(ctrl_info, device);
1116 }
1117
1118out:
1119 kfree(buffer);
1120
1121 return rc;
1122}
1123
1124static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1125 struct pqi_scsi_dev *device,
1126 struct bmic_identify_physical_device *id_phys)
1127{
1128 int rc;
1129
1130 memset(id_phys, 0, sizeof(*id_phys));
1131
1132 rc = pqi_identify_physical_device(ctrl_info, device,
1133 id_phys, sizeof(*id_phys));
1134 if (rc) {
1135 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1136 return;
1137 }
1138
1139 device->queue_depth =
1140 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1141 device->device_type = id_phys->device_type;
1142 device->active_path_index = id_phys->active_path_number;
1143 device->path_map = id_phys->redundant_path_present_map;
1144 memcpy(&device->box,
1145 &id_phys->alternate_paths_phys_box_on_port,
1146 sizeof(device->box));
1147 memcpy(&device->phys_connector,
1148 &id_phys->alternate_paths_phys_connector,
1149 sizeof(device->phys_connector));
1150 device->bay = id_phys->phys_bay_in_box;
1151}
1152
1153static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1154 struct pqi_scsi_dev *device)
1155{
1156 char *status;
1157 static const char unknown_state_str[] =
1158 "Volume is in an unknown state (%u)";
1159 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1160
1161 switch (device->volume_status) {
1162 case CISS_LV_OK:
1163 status = "Volume online";
1164 break;
1165 case CISS_LV_FAILED:
1166 status = "Volume failed";
1167 break;
1168 case CISS_LV_NOT_CONFIGURED:
1169 status = "Volume not configured";
1170 break;
1171 case CISS_LV_DEGRADED:
1172 status = "Volume degraded";
1173 break;
1174 case CISS_LV_READY_FOR_RECOVERY:
1175 status = "Volume ready for recovery operation";
1176 break;
1177 case CISS_LV_UNDERGOING_RECOVERY:
1178 status = "Volume undergoing recovery";
1179 break;
1180 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1181 status = "Wrong physical drive was replaced";
1182 break;
1183 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1184 status = "A physical drive not properly connected";
1185 break;
1186 case CISS_LV_HARDWARE_OVERHEATING:
1187 status = "Hardware is overheating";
1188 break;
1189 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1190 status = "Hardware has overheated";
1191 break;
1192 case CISS_LV_UNDERGOING_EXPANSION:
1193 status = "Volume undergoing expansion";
1194 break;
1195 case CISS_LV_NOT_AVAILABLE:
1196 status = "Volume waiting for transforming volume";
1197 break;
1198 case CISS_LV_QUEUED_FOR_EXPANSION:
1199 status = "Volume queued for expansion";
1200 break;
1201 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1202 status = "Volume disabled due to SCSI ID conflict";
1203 break;
1204 case CISS_LV_EJECTED:
1205 status = "Volume has been ejected";
1206 break;
1207 case CISS_LV_UNDERGOING_ERASE:
1208 status = "Volume undergoing background erase";
1209 break;
1210 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1211 status = "Volume ready for predictive spare rebuild";
1212 break;
1213 case CISS_LV_UNDERGOING_RPI:
1214 status = "Volume undergoing rapid parity initialization";
1215 break;
1216 case CISS_LV_PENDING_RPI:
1217 status = "Volume queued for rapid parity initialization";
1218 break;
1219 case CISS_LV_ENCRYPTED_NO_KEY:
1220 status = "Encrypted volume inaccessible - key not present";
1221 break;
1222 case CISS_LV_UNDERGOING_ENCRYPTION:
1223 status = "Volume undergoing encryption process";
1224 break;
1225 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1226 status = "Volume undergoing encryption re-keying process";
1227 break;
1228 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1229 status =
1230 "Encrypted volume inaccessible - disabled on ctrl";
1231 break;
1232 case CISS_LV_PENDING_ENCRYPTION:
1233 status = "Volume pending migration to encrypted state";
1234 break;
1235 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1236 status = "Volume pending encryption rekeying";
1237 break;
1238 case CISS_LV_NOT_SUPPORTED:
1239 status = "Volume not supported on this controller";
1240 break;
1241 case CISS_LV_STATUS_UNAVAILABLE:
1242 status = "Volume status not available";
1243 break;
1244 default:
1245 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1246 unknown_state_str, device->volume_status);
1247 status = unknown_state_buffer;
1248 break;
1249 }
1250
1251 dev_info(&ctrl_info->pci_dev->dev,
1252 "scsi %d:%d:%d:%d %s\n",
1253 ctrl_info->scsi_host->host_no,
1254 device->bus, device->target, device->lun, status);
1255}
1256
1257static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1258 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1259{
1260 struct pqi_scsi_dev *device;
1261
1262 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1263 scsi_device_list_entry) {
1264 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1265 continue;
1266 if (pqi_is_logical_device(device))
1267 continue;
1268 if (device->aio_handle == aio_handle)
1269 return device;
1270 }
1271
1272 return NULL;
1273}
1274
1275static void pqi_update_logical_drive_queue_depth(
1276 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1277{
1278 unsigned int i;
1279 struct raid_map *raid_map;
1280 struct raid_map_disk_data *disk_data;
1281 struct pqi_scsi_dev *phys_disk;
1282 unsigned int num_phys_disks;
1283 unsigned int num_raid_map_entries;
1284 unsigned int queue_depth;
1285
1286 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1287
1288 raid_map = logical_drive->raid_map;
1289 if (!raid_map)
1290 return;
1291
1292 disk_data = raid_map->disk_data;
1293 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1294 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1295 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1296 num_raid_map_entries = num_phys_disks *
1297 get_unaligned_le16(&raid_map->row_cnt);
1298
1299 queue_depth = 0;
1300 for (i = 0; i < num_raid_map_entries; i++) {
1301 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1302 disk_data[i].aio_handle);
1303
1304 if (!phys_disk) {
1305 dev_warn(&ctrl_info->pci_dev->dev,
1306 "failed to find physical disk for logical drive %016llx\n",
1307 get_unaligned_be64(logical_drive->scsi3addr));
1308 logical_drive->offload_enabled = false;
1309 logical_drive->offload_enabled_pending = false;
1310 kfree(raid_map);
1311 logical_drive->raid_map = NULL;
1312 return;
1313 }
1314
1315 queue_depth += phys_disk->queue_depth;
1316 }
1317
1318 logical_drive->queue_depth = queue_depth;
1319}
1320
1321static void pqi_update_all_logical_drive_queue_depths(
1322 struct pqi_ctrl_info *ctrl_info)
1323{
1324 struct pqi_scsi_dev *device;
1325
1326 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1327 scsi_device_list_entry) {
1328 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1329 continue;
1330 if (!pqi_is_logical_device(device))
1331 continue;
1332 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1333 }
1334}
1335
1336static void pqi_rescan_worker(struct work_struct *work)
1337{
1338 struct pqi_ctrl_info *ctrl_info;
1339
1340 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1341 rescan_work);
1342
1343 pqi_scan_scsi_devices(ctrl_info);
1344}
1345
1346static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1347 struct pqi_scsi_dev *device)
1348{
1349 int rc;
1350
1351 if (pqi_is_logical_device(device))
1352 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1353 device->target, device->lun);
1354 else
1355 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1356
1357 return rc;
1358}
1359
1360static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1361 struct pqi_scsi_dev *device)
1362{
1363 if (pqi_is_logical_device(device))
1364 scsi_remove_device(device->sdev);
1365 else
1366 pqi_remove_sas_device(device);
1367}
1368
1369/* Assumes the SCSI device list lock is held. */
1370
1371static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1372 int bus, int target, int lun)
1373{
1374 struct pqi_scsi_dev *device;
1375
1376 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1377 scsi_device_list_entry)
1378 if (device->bus == bus && device->target == target &&
1379 device->lun == lun)
1380 return device;
1381
1382 return NULL;
1383}
1384
1385static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1386 struct pqi_scsi_dev *dev2)
1387{
1388 if (dev1->is_physical_device != dev2->is_physical_device)
1389 return false;
1390
1391 if (dev1->is_physical_device)
1392 return dev1->wwid == dev2->wwid;
1393
1394 return memcmp(dev1->volume_id, dev2->volume_id,
1395 sizeof(dev1->volume_id)) == 0;
1396}
1397
1398enum pqi_find_result {
1399 DEVICE_NOT_FOUND,
1400 DEVICE_CHANGED,
1401 DEVICE_SAME,
1402};
1403
1404static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1405 struct pqi_scsi_dev *device_to_find,
1406 struct pqi_scsi_dev **matching_device)
1407{
1408 struct pqi_scsi_dev *device;
1409
1410 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1411 scsi_device_list_entry) {
1412 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1413 device->scsi3addr)) {
1414 *matching_device = device;
1415 if (pqi_device_equal(device_to_find, device)) {
1416 if (device_to_find->volume_offline)
1417 return DEVICE_CHANGED;
1418 return DEVICE_SAME;
1419 }
1420 return DEVICE_CHANGED;
1421 }
1422 }
1423
1424 return DEVICE_NOT_FOUND;
1425}
1426
1427static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1428 char *action, struct pqi_scsi_dev *device)
1429{
1430 dev_info(&ctrl_info->pci_dev->dev,
1431 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1432 action,
1433 ctrl_info->scsi_host->host_no,
1434 device->bus,
1435 device->target,
1436 device->lun,
1437 scsi_device_type(device->devtype),
1438 device->vendor,
1439 device->model,
1440 pqi_raid_level_to_string(device->raid_level),
1441 device->offload_configured ? '+' : '-',
1442 device->offload_enabled_pending ? '+' : '-',
1443 device->expose_device ? '+' : '-',
1444 device->queue_depth);
1445}
1446
1447/* Assumes the SCSI device list lock is held. */
1448
1449static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1450 struct pqi_scsi_dev *new_device)
1451{
1452 existing_device->devtype = new_device->devtype;
1453 existing_device->device_type = new_device->device_type;
1454 existing_device->bus = new_device->bus;
1455 if (new_device->target_lun_valid) {
1456 existing_device->target = new_device->target;
1457 existing_device->lun = new_device->lun;
1458 existing_device->target_lun_valid = true;
1459 }
1460
1461 /* By definition, the scsi3addr and wwid fields are already the same. */
1462
1463 existing_device->is_physical_device = new_device->is_physical_device;
1464 existing_device->expose_device = new_device->expose_device;
1465 existing_device->no_uld_attach = new_device->no_uld_attach;
1466 existing_device->aio_enabled = new_device->aio_enabled;
1467 memcpy(existing_device->vendor, new_device->vendor,
1468 sizeof(existing_device->vendor));
1469 memcpy(existing_device->model, new_device->model,
1470 sizeof(existing_device->model));
1471 existing_device->sas_address = new_device->sas_address;
1472 existing_device->raid_level = new_device->raid_level;
1473 existing_device->queue_depth = new_device->queue_depth;
1474 existing_device->aio_handle = new_device->aio_handle;
1475 existing_device->volume_status = new_device->volume_status;
1476 existing_device->active_path_index = new_device->active_path_index;
1477 existing_device->path_map = new_device->path_map;
1478 existing_device->bay = new_device->bay;
1479 memcpy(existing_device->box, new_device->box,
1480 sizeof(existing_device->box));
1481 memcpy(existing_device->phys_connector, new_device->phys_connector,
1482 sizeof(existing_device->phys_connector));
1483 existing_device->offload_configured = new_device->offload_configured;
1484 existing_device->offload_enabled = false;
1485 existing_device->offload_enabled_pending =
1486 new_device->offload_enabled_pending;
1487 existing_device->offload_to_mirror = 0;
1488 kfree(existing_device->raid_map);
1489 existing_device->raid_map = new_device->raid_map;
1490
1491 /* To prevent this from being freed later. */
1492 new_device->raid_map = NULL;
1493}
1494
1495static inline void pqi_free_device(struct pqi_scsi_dev *device)
1496{
1497 if (device) {
1498 kfree(device->raid_map);
1499 kfree(device);
1500 }
1501}
1502
1503/*
1504 * Called when exposing a new device to the OS fails in order to re-adjust
1505 * our internal SCSI device list to match the SCSI ML's view.
1506 */
1507
1508static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1509 struct pqi_scsi_dev *device)
1510{
1511 unsigned long flags;
1512
1513 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1514 list_del(&device->scsi_device_list_entry);
1515 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1516
1517 /* Allow the device structure to be freed later. */
1518 device->keep_device = false;
1519}
1520
1521static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1522 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1523{
1524 int rc;
1525 unsigned int i;
1526 unsigned long flags;
1527 enum pqi_find_result find_result;
1528 struct pqi_scsi_dev *device;
1529 struct pqi_scsi_dev *next;
1530 struct pqi_scsi_dev *matching_device;
1531 struct list_head add_list;
1532 struct list_head delete_list;
1533
1534 INIT_LIST_HEAD(&add_list);
1535 INIT_LIST_HEAD(&delete_list);
1536
1537 /*
1538 * The idea here is to do as little work as possible while holding the
1539 * spinlock. That's why we go to great pains to defer anything other
1540 * than updating the internal device list until after we release the
1541 * spinlock.
1542 */
1543
1544 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1545
1546 /* Assume that all devices in the existing list have gone away. */
1547 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1548 scsi_device_list_entry)
1549 device->device_gone = true;
1550
1551 for (i = 0; i < num_new_devices; i++) {
1552 device = new_device_list[i];
1553
1554 find_result = pqi_scsi_find_entry(ctrl_info, device,
1555 &matching_device);
1556
1557 switch (find_result) {
1558 case DEVICE_SAME:
1559 /*
1560 * The newly found device is already in the existing
1561 * device list.
1562 */
1563 device->new_device = false;
1564 matching_device->device_gone = false;
1565 pqi_scsi_update_device(matching_device, device);
1566 break;
1567 case DEVICE_NOT_FOUND:
1568 /*
1569 * The newly found device is NOT in the existing device
1570 * list.
1571 */
1572 device->new_device = true;
1573 break;
1574 case DEVICE_CHANGED:
1575 /*
1576 * The original device has gone away and we need to add
1577 * the new device.
1578 */
1579 device->new_device = true;
1580 break;
1581 default:
1582 WARN_ON(find_result);
1583 break;
1584 }
1585 }
1586
1587 /* Process all devices that have gone away. */
1588 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1589 scsi_device_list_entry) {
1590 if (device->device_gone) {
1591 list_del(&device->scsi_device_list_entry);
1592 list_add_tail(&device->delete_list_entry, &delete_list);
1593 }
1594 }
1595
1596 /* Process all new devices. */
1597 for (i = 0; i < num_new_devices; i++) {
1598 device = new_device_list[i];
1599 if (!device->new_device)
1600 continue;
1601 if (device->volume_offline)
1602 continue;
1603 list_add_tail(&device->scsi_device_list_entry,
1604 &ctrl_info->scsi_device_list);
1605 list_add_tail(&device->add_list_entry, &add_list);
1606 /* To prevent this device structure from being freed later. */
1607 device->keep_device = true;
1608 }
1609
1610 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1611
1612 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1613 scsi_device_list_entry)
1614 device->offload_enabled =
1615 device->offload_enabled_pending;
1616
1617 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1618
1619 /* Remove all devices that have gone away. */
1620 list_for_each_entry_safe(device, next, &delete_list,
1621 delete_list_entry) {
1622 if (device->sdev)
1623 pqi_remove_device(ctrl_info, device);
1624 if (device->volume_offline) {
1625 pqi_dev_info(ctrl_info, "offline", device);
1626 pqi_show_volume_status(ctrl_info, device);
1627 } else {
1628 pqi_dev_info(ctrl_info, "removed", device);
1629 }
1630 list_del(&device->delete_list_entry);
1631 pqi_free_device(device);
1632 }
1633
1634 /*
1635 * Notify the SCSI ML if the queue depth of any existing device has
1636 * changed.
1637 */
1638 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1639 scsi_device_list_entry) {
1640 if (device->sdev && device->queue_depth !=
1641 device->advertised_queue_depth) {
1642 device->advertised_queue_depth = device->queue_depth;
1643 scsi_change_queue_depth(device->sdev,
1644 device->advertised_queue_depth);
1645 }
1646 }
1647
1648 /* Expose any new devices. */
1649 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1650 if (device->expose_device && !device->sdev) {
1651 rc = pqi_add_device(ctrl_info, device);
1652 if (rc) {
1653 dev_warn(&ctrl_info->pci_dev->dev,
1654 "scsi %d:%d:%d:%d addition failed, device not added\n",
1655 ctrl_info->scsi_host->host_no,
1656 device->bus, device->target,
1657 device->lun);
1658 pqi_fixup_botched_add(ctrl_info, device);
1659 continue;
1660 }
1661 }
1662 pqi_dev_info(ctrl_info, "added", device);
1663 }
1664}
1665
1666static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1667{
1668 bool is_supported = false;
1669
1670 switch (device->devtype) {
1671 case TYPE_DISK:
1672 case TYPE_ZBC:
1673 case TYPE_TAPE:
1674 case TYPE_MEDIUM_CHANGER:
1675 case TYPE_ENCLOSURE:
1676 is_supported = true;
1677 break;
1678 case TYPE_RAID:
1679 /*
1680 * Only support the HBA controller itself as a RAID
1681 * controller. If it's a RAID controller other than
1682 * the HBA itself (an external RAID controller, MSA500
1683 * or similar), we don't support it.
1684 */
1685 if (pqi_is_hba_lunid(device->scsi3addr))
1686 is_supported = true;
1687 break;
1688 }
1689
1690 return is_supported;
1691}
1692
1693static inline bool pqi_skip_device(u8 *scsi3addr,
1694 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1695{
1696 u8 device_flags;
1697
1698 if (!MASKED_DEVICE(scsi3addr))
1699 return false;
1700
1701 /* The device is masked. */
1702
1703 device_flags = phys_lun_ext_entry->device_flags;
1704
1705 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1706 /*
1707 * It's a non-disk device. We ignore all devices of this type
1708 * when they're masked.
1709 */
1710 return true;
1711 }
1712
1713 return false;
1714}
1715
1716static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1717{
1718 /* Expose all devices except for physical devices that are masked. */
1719 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1720 return false;
1721
1722 return true;
1723}
1724
1725static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1726{
1727 int i;
1728 int rc;
1729 struct list_head new_device_list_head;
1730 struct report_phys_lun_extended *physdev_list = NULL;
1731 struct report_log_lun_extended *logdev_list = NULL;
1732 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1733 struct report_log_lun_extended_entry *log_lun_ext_entry;
1734 struct bmic_identify_physical_device *id_phys = NULL;
1735 u32 num_physicals;
1736 u32 num_logicals;
1737 struct pqi_scsi_dev **new_device_list = NULL;
1738 struct pqi_scsi_dev *device;
1739 struct pqi_scsi_dev *next;
1740 unsigned int num_new_devices;
1741 unsigned int num_valid_devices;
1742 bool is_physical_device;
1743 u8 *scsi3addr;
1744 static char *out_of_memory_msg =
1745 "out of memory, device discovery stopped";
1746
1747 INIT_LIST_HEAD(&new_device_list_head);
1748
1749 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1750 if (rc)
1751 goto out;
1752
1753 if (physdev_list)
1754 num_physicals =
1755 get_unaligned_be32(&physdev_list->header.list_length)
1756 / sizeof(physdev_list->lun_entries[0]);
1757 else
1758 num_physicals = 0;
1759
1760 if (logdev_list)
1761 num_logicals =
1762 get_unaligned_be32(&logdev_list->header.list_length)
1763 / sizeof(logdev_list->lun_entries[0]);
1764 else
1765 num_logicals = 0;
1766
1767 if (num_physicals) {
1768 /*
1769 * We need this buffer for calls to pqi_get_physical_disk_info()
1770 * below. We allocate it here instead of inside
1771 * pqi_get_physical_disk_info() because it's a fairly large
1772 * buffer.
1773 */
1774 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1775 if (!id_phys) {
1776 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1777 out_of_memory_msg);
1778 rc = -ENOMEM;
1779 goto out;
1780 }
1781 }
1782
1783 num_new_devices = num_physicals + num_logicals;
1784
1785 new_device_list = kmalloc(sizeof(*new_device_list) *
1786 num_new_devices, GFP_KERNEL);
1787 if (!new_device_list) {
1788 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1789 rc = -ENOMEM;
1790 goto out;
1791 }
1792
1793 for (i = 0; i < num_new_devices; i++) {
1794 device = kzalloc(sizeof(*device), GFP_KERNEL);
1795 if (!device) {
1796 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1797 out_of_memory_msg);
1798 rc = -ENOMEM;
1799 goto out;
1800 }
1801 list_add_tail(&device->new_device_list_entry,
1802 &new_device_list_head);
1803 }
1804
1805 device = NULL;
1806 num_valid_devices = 0;
1807
1808 for (i = 0; i < num_new_devices; i++) {
1809
1810 if (i < num_physicals) {
1811 is_physical_device = true;
1812 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1813 log_lun_ext_entry = NULL;
1814 scsi3addr = phys_lun_ext_entry->lunid;
1815 } else {
1816 is_physical_device = false;
1817 phys_lun_ext_entry = NULL;
1818 log_lun_ext_entry =
1819 &logdev_list->lun_entries[i - num_physicals];
1820 scsi3addr = log_lun_ext_entry->lunid;
1821 }
1822
1823 if (is_physical_device &&
1824 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1825 continue;
1826
1827 if (device)
1828 device = list_next_entry(device, new_device_list_entry);
1829 else
1830 device = list_first_entry(&new_device_list_head,
1831 struct pqi_scsi_dev, new_device_list_entry);
1832
1833 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1834 device->is_physical_device = is_physical_device;
1835 device->raid_level = SA_RAID_UNKNOWN;
1836
1837 /* Gather information about the device. */
1838 rc = pqi_get_device_info(ctrl_info, device);
1839 if (rc == -ENOMEM) {
1840 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1841 out_of_memory_msg);
1842 goto out;
1843 }
1844 if (rc) {
1845 dev_warn(&ctrl_info->pci_dev->dev,
1846 "obtaining device info failed, skipping device %016llx\n",
1847 get_unaligned_be64(device->scsi3addr));
1848 rc = 0;
1849 continue;
1850 }
1851
1852 if (!pqi_is_supported_device(device))
1853 continue;
1854
1855 pqi_assign_bus_target_lun(device);
1856
1857 device->expose_device = pqi_expose_device(device);
1858
1859 if (device->is_physical_device) {
1860 device->wwid = phys_lun_ext_entry->wwid;
1861 if ((phys_lun_ext_entry->device_flags &
1862 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1863 phys_lun_ext_entry->aio_handle)
1864 device->aio_enabled = true;
1865 } else {
1866 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1867 sizeof(device->volume_id));
1868 }
1869
1870 switch (device->devtype) {
1871 case TYPE_DISK:
1872 case TYPE_ZBC:
1873 case TYPE_ENCLOSURE:
1874 if (device->is_physical_device) {
1875 device->sas_address =
1876 get_unaligned_be64(&device->wwid);
1877 if (device->devtype == TYPE_DISK ||
1878 device->devtype == TYPE_ZBC) {
1879 device->aio_handle =
1880 phys_lun_ext_entry->aio_handle;
1881 pqi_get_physical_disk_info(ctrl_info,
1882 device, id_phys);
1883 }
1884 }
1885 break;
1886 }
1887
1888 new_device_list[num_valid_devices++] = device;
1889 }
1890
1891 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1892
1893out:
1894 list_for_each_entry_safe(device, next, &new_device_list_head,
1895 new_device_list_entry) {
1896 if (device->keep_device)
1897 continue;
1898 list_del(&device->new_device_list_entry);
1899 pqi_free_device(device);
1900 }
1901
1902 kfree(new_device_list);
1903 kfree(physdev_list);
1904 kfree(logdev_list);
1905 kfree(id_phys);
1906
1907 return rc;
1908}
1909
1910static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1911{
1912 unsigned long flags;
1913 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001914
Kevin Barnetta37ef742017-05-03 18:52:22 -05001915 while (1) {
1916 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05001917
Kevin Barnetta37ef742017-05-03 18:52:22 -05001918 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1919 struct pqi_scsi_dev, scsi_device_list_entry);
1920 if (device)
1921 list_del(&device->scsi_device_list_entry);
1922
1923 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1924 flags);
1925
1926 if (!device)
1927 break;
1928
Kevin Barnett6c223762016-06-27 16:41:00 -05001929 if (device->sdev)
1930 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001931 pqi_free_device(device);
1932 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001933}
1934
1935static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1936{
1937 int rc;
1938
1939 if (pqi_ctrl_offline(ctrl_info))
1940 return -ENXIO;
1941
1942 mutex_lock(&ctrl_info->scan_mutex);
1943
1944 rc = pqi_update_scsi_devices(ctrl_info);
1945 if (rc)
1946 pqi_schedule_rescan_worker(ctrl_info);
1947
1948 mutex_unlock(&ctrl_info->scan_mutex);
1949
1950 return rc;
1951}
1952
1953static void pqi_scan_start(struct Scsi_Host *shost)
1954{
1955 pqi_scan_scsi_devices(shost_to_hba(shost));
1956}
1957
1958/* Returns TRUE if scan is finished. */
1959
1960static int pqi_scan_finished(struct Scsi_Host *shost,
1961 unsigned long elapsed_time)
1962{
1963 struct pqi_ctrl_info *ctrl_info;
1964
1965 ctrl_info = shost_priv(shost);
1966
1967 return !mutex_is_locked(&ctrl_info->scan_mutex);
1968}
1969
1970static inline void pqi_set_encryption_info(
1971 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1972 u64 first_block)
1973{
1974 u32 volume_blk_size;
1975
1976 /*
1977 * Set the encryption tweak values based on logical block address.
1978 * If the block size is 512, the tweak value is equal to the LBA.
1979 * For other block sizes, tweak value is (LBA * block size) / 512.
1980 */
1981 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1982 if (volume_blk_size != 512)
1983 first_block = (first_block * volume_blk_size) / 512;
1984
1985 encryption_info->data_encryption_key_index =
1986 get_unaligned_le16(&raid_map->data_encryption_key_index);
1987 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1988 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1989}
1990
1991/*
1992 * Attempt to perform offload RAID mapping for a logical volume I/O.
1993 */
1994
1995#define PQI_RAID_BYPASS_INELIGIBLE 1
1996
1997static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1998 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1999 struct pqi_queue_group *queue_group)
2000{
2001 struct raid_map *raid_map;
2002 bool is_write = false;
2003 u32 map_index;
2004 u64 first_block;
2005 u64 last_block;
2006 u32 block_cnt;
2007 u32 blocks_per_row;
2008 u64 first_row;
2009 u64 last_row;
2010 u32 first_row_offset;
2011 u32 last_row_offset;
2012 u32 first_column;
2013 u32 last_column;
2014 u64 r0_first_row;
2015 u64 r0_last_row;
2016 u32 r5or6_blocks_per_row;
2017 u64 r5or6_first_row;
2018 u64 r5or6_last_row;
2019 u32 r5or6_first_row_offset;
2020 u32 r5or6_last_row_offset;
2021 u32 r5or6_first_column;
2022 u32 r5or6_last_column;
2023 u16 data_disks_per_row;
2024 u32 total_disks_per_row;
2025 u16 layout_map_count;
2026 u32 stripesize;
2027 u16 strip_size;
2028 u32 first_group;
2029 u32 last_group;
2030 u32 current_group;
2031 u32 map_row;
2032 u32 aio_handle;
2033 u64 disk_block;
2034 u32 disk_block_cnt;
2035 u8 cdb[16];
2036 u8 cdb_length;
2037 int offload_to_mirror;
2038 struct pqi_encryption_info *encryption_info_ptr;
2039 struct pqi_encryption_info encryption_info;
2040#if BITS_PER_LONG == 32
2041 u64 tmpdiv;
2042#endif
2043
2044 /* Check for valid opcode, get LBA and block count. */
2045 switch (scmd->cmnd[0]) {
2046 case WRITE_6:
2047 is_write = true;
2048 /* fall through */
2049 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002050 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2051 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002052 block_cnt = (u32)scmd->cmnd[4];
2053 if (block_cnt == 0)
2054 block_cnt = 256;
2055 break;
2056 case WRITE_10:
2057 is_write = true;
2058 /* fall through */
2059 case READ_10:
2060 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2061 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2062 break;
2063 case WRITE_12:
2064 is_write = true;
2065 /* fall through */
2066 case READ_12:
2067 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2068 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2069 break;
2070 case WRITE_16:
2071 is_write = true;
2072 /* fall through */
2073 case READ_16:
2074 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2075 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2076 break;
2077 default:
2078 /* Process via normal I/O path. */
2079 return PQI_RAID_BYPASS_INELIGIBLE;
2080 }
2081
2082 /* Check for write to non-RAID-0. */
2083 if (is_write && device->raid_level != SA_RAID_0)
2084 return PQI_RAID_BYPASS_INELIGIBLE;
2085
2086 if (unlikely(block_cnt == 0))
2087 return PQI_RAID_BYPASS_INELIGIBLE;
2088
2089 last_block = first_block + block_cnt - 1;
2090 raid_map = device->raid_map;
2091
2092 /* Check for invalid block or wraparound. */
2093 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2094 last_block < first_block)
2095 return PQI_RAID_BYPASS_INELIGIBLE;
2096
2097 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2098 strip_size = get_unaligned_le16(&raid_map->strip_size);
2099 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2100
2101 /* Calculate stripe information for the request. */
2102 blocks_per_row = data_disks_per_row * strip_size;
2103#if BITS_PER_LONG == 32
2104 tmpdiv = first_block;
2105 do_div(tmpdiv, blocks_per_row);
2106 first_row = tmpdiv;
2107 tmpdiv = last_block;
2108 do_div(tmpdiv, blocks_per_row);
2109 last_row = tmpdiv;
2110 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2111 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2112 tmpdiv = first_row_offset;
2113 do_div(tmpdiv, strip_size);
2114 first_column = tmpdiv;
2115 tmpdiv = last_row_offset;
2116 do_div(tmpdiv, strip_size);
2117 last_column = tmpdiv;
2118#else
2119 first_row = first_block / blocks_per_row;
2120 last_row = last_block / blocks_per_row;
2121 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2122 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2123 first_column = first_row_offset / strip_size;
2124 last_column = last_row_offset / strip_size;
2125#endif
2126
2127 /* If this isn't a single row/column then give to the controller. */
2128 if (first_row != last_row || first_column != last_column)
2129 return PQI_RAID_BYPASS_INELIGIBLE;
2130
2131 /* Proceeding with driver mapping. */
2132 total_disks_per_row = data_disks_per_row +
2133 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2134 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2135 get_unaligned_le16(&raid_map->row_cnt);
2136 map_index = (map_row * total_disks_per_row) + first_column;
2137
2138 /* RAID 1 */
2139 if (device->raid_level == SA_RAID_1) {
2140 if (device->offload_to_mirror)
2141 map_index += data_disks_per_row;
2142 device->offload_to_mirror = !device->offload_to_mirror;
2143 } else if (device->raid_level == SA_RAID_ADM) {
2144 /* RAID ADM */
2145 /*
2146 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2147 * divisible by 3.
2148 */
2149 offload_to_mirror = device->offload_to_mirror;
2150 if (offload_to_mirror == 0) {
2151 /* use physical disk in the first mirrored group. */
2152 map_index %= data_disks_per_row;
2153 } else {
2154 do {
2155 /*
2156 * Determine mirror group that map_index
2157 * indicates.
2158 */
2159 current_group = map_index / data_disks_per_row;
2160
2161 if (offload_to_mirror != current_group) {
2162 if (current_group <
2163 layout_map_count - 1) {
2164 /*
2165 * Select raid index from
2166 * next group.
2167 */
2168 map_index += data_disks_per_row;
2169 current_group++;
2170 } else {
2171 /*
2172 * Select raid index from first
2173 * group.
2174 */
2175 map_index %= data_disks_per_row;
2176 current_group = 0;
2177 }
2178 }
2179 } while (offload_to_mirror != current_group);
2180 }
2181
2182 /* Set mirror group to use next time. */
2183 offload_to_mirror =
2184 (offload_to_mirror >= layout_map_count - 1) ?
2185 0 : offload_to_mirror + 1;
2186 WARN_ON(offload_to_mirror >= layout_map_count);
2187 device->offload_to_mirror = offload_to_mirror;
2188 /*
2189 * Avoid direct use of device->offload_to_mirror within this
2190 * function since multiple threads might simultaneously
2191 * increment it beyond the range of device->layout_map_count -1.
2192 */
2193 } else if ((device->raid_level == SA_RAID_5 ||
2194 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2195 /* RAID 50/60 */
2196 /* Verify first and last block are in same RAID group */
2197 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2198 stripesize = r5or6_blocks_per_row * layout_map_count;
2199#if BITS_PER_LONG == 32
2200 tmpdiv = first_block;
2201 first_group = do_div(tmpdiv, stripesize);
2202 tmpdiv = first_group;
2203 do_div(tmpdiv, r5or6_blocks_per_row);
2204 first_group = tmpdiv;
2205 tmpdiv = last_block;
2206 last_group = do_div(tmpdiv, stripesize);
2207 tmpdiv = last_group;
2208 do_div(tmpdiv, r5or6_blocks_per_row);
2209 last_group = tmpdiv;
2210#else
2211 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2212 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2213#endif
2214 if (first_group != last_group)
2215 return PQI_RAID_BYPASS_INELIGIBLE;
2216
2217 /* Verify request is in a single row of RAID 5/6 */
2218#if BITS_PER_LONG == 32
2219 tmpdiv = first_block;
2220 do_div(tmpdiv, stripesize);
2221 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2222 tmpdiv = last_block;
2223 do_div(tmpdiv, stripesize);
2224 r5or6_last_row = r0_last_row = tmpdiv;
2225#else
2226 first_row = r5or6_first_row = r0_first_row =
2227 first_block / stripesize;
2228 r5or6_last_row = r0_last_row = last_block / stripesize;
2229#endif
2230 if (r5or6_first_row != r5or6_last_row)
2231 return PQI_RAID_BYPASS_INELIGIBLE;
2232
2233 /* Verify request is in a single column */
2234#if BITS_PER_LONG == 32
2235 tmpdiv = first_block;
2236 first_row_offset = do_div(tmpdiv, stripesize);
2237 tmpdiv = first_row_offset;
2238 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2239 r5or6_first_row_offset = first_row_offset;
2240 tmpdiv = last_block;
2241 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2242 tmpdiv = r5or6_last_row_offset;
2243 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2244 tmpdiv = r5or6_first_row_offset;
2245 do_div(tmpdiv, strip_size);
2246 first_column = r5or6_first_column = tmpdiv;
2247 tmpdiv = r5or6_last_row_offset;
2248 do_div(tmpdiv, strip_size);
2249 r5or6_last_column = tmpdiv;
2250#else
2251 first_row_offset = r5or6_first_row_offset =
2252 (u32)((first_block % stripesize) %
2253 r5or6_blocks_per_row);
2254
2255 r5or6_last_row_offset =
2256 (u32)((last_block % stripesize) %
2257 r5or6_blocks_per_row);
2258
2259 first_column = r5or6_first_row_offset / strip_size;
2260 r5or6_first_column = first_column;
2261 r5or6_last_column = r5or6_last_row_offset / strip_size;
2262#endif
2263 if (r5or6_first_column != r5or6_last_column)
2264 return PQI_RAID_BYPASS_INELIGIBLE;
2265
2266 /* Request is eligible */
2267 map_row =
2268 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2269 get_unaligned_le16(&raid_map->row_cnt);
2270
2271 map_index = (first_group *
2272 (get_unaligned_le16(&raid_map->row_cnt) *
2273 total_disks_per_row)) +
2274 (map_row * total_disks_per_row) + first_column;
2275 }
2276
2277 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2278 return PQI_RAID_BYPASS_INELIGIBLE;
2279
2280 aio_handle = raid_map->disk_data[map_index].aio_handle;
2281 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2282 first_row * strip_size +
2283 (first_row_offset - first_column * strip_size);
2284 disk_block_cnt = block_cnt;
2285
2286 /* Handle differing logical/physical block sizes. */
2287 if (raid_map->phys_blk_shift) {
2288 disk_block <<= raid_map->phys_blk_shift;
2289 disk_block_cnt <<= raid_map->phys_blk_shift;
2290 }
2291
2292 if (unlikely(disk_block_cnt > 0xffff))
2293 return PQI_RAID_BYPASS_INELIGIBLE;
2294
2295 /* Build the new CDB for the physical disk I/O. */
2296 if (disk_block > 0xffffffff) {
2297 cdb[0] = is_write ? WRITE_16 : READ_16;
2298 cdb[1] = 0;
2299 put_unaligned_be64(disk_block, &cdb[2]);
2300 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2301 cdb[14] = 0;
2302 cdb[15] = 0;
2303 cdb_length = 16;
2304 } else {
2305 cdb[0] = is_write ? WRITE_10 : READ_10;
2306 cdb[1] = 0;
2307 put_unaligned_be32((u32)disk_block, &cdb[2]);
2308 cdb[6] = 0;
2309 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2310 cdb[9] = 0;
2311 cdb_length = 10;
2312 }
2313
2314 if (get_unaligned_le16(&raid_map->flags) &
2315 RAID_MAP_ENCRYPTION_ENABLED) {
2316 pqi_set_encryption_info(&encryption_info, raid_map,
2317 first_block);
2318 encryption_info_ptr = &encryption_info;
2319 } else {
2320 encryption_info_ptr = NULL;
2321 }
2322
2323 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2324 cdb, cdb_length, queue_group, encryption_info_ptr);
2325}
2326
2327#define PQI_STATUS_IDLE 0x0
2328
2329#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2330#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2331
2332#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2333#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2334#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2335#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2336#define PQI_DEVICE_STATE_ERROR 0x4
2337
2338#define PQI_MODE_READY_TIMEOUT_SECS 30
2339#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2340
2341static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2342{
2343 struct pqi_device_registers __iomem *pqi_registers;
2344 unsigned long timeout;
2345 u64 signature;
2346 u8 status;
2347
2348 pqi_registers = ctrl_info->pqi_registers;
2349 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2350
2351 while (1) {
2352 signature = readq(&pqi_registers->signature);
2353 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2354 sizeof(signature)) == 0)
2355 break;
2356 if (time_after(jiffies, timeout)) {
2357 dev_err(&ctrl_info->pci_dev->dev,
2358 "timed out waiting for PQI signature\n");
2359 return -ETIMEDOUT;
2360 }
2361 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2362 }
2363
2364 while (1) {
2365 status = readb(&pqi_registers->function_and_status_code);
2366 if (status == PQI_STATUS_IDLE)
2367 break;
2368 if (time_after(jiffies, timeout)) {
2369 dev_err(&ctrl_info->pci_dev->dev,
2370 "timed out waiting for PQI IDLE\n");
2371 return -ETIMEDOUT;
2372 }
2373 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2374 }
2375
2376 while (1) {
2377 if (readl(&pqi_registers->device_status) ==
2378 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2379 break;
2380 if (time_after(jiffies, timeout)) {
2381 dev_err(&ctrl_info->pci_dev->dev,
2382 "timed out waiting for PQI all registers ready\n");
2383 return -ETIMEDOUT;
2384 }
2385 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2386 }
2387
2388 return 0;
2389}
2390
2391static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2392{
2393 struct pqi_scsi_dev *device;
2394
2395 device = io_request->scmd->device->hostdata;
2396 device->offload_enabled = false;
2397}
2398
2399static inline void pqi_take_device_offline(struct scsi_device *sdev)
2400{
2401 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002402 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002403
2404 if (scsi_device_online(sdev)) {
2405 scsi_device_set_state(sdev, SDEV_OFFLINE);
2406 ctrl_info = shost_to_hba(sdev->host);
2407 schedule_delayed_work(&ctrl_info->rescan_work, 0);
Kevin Barnette58081a2016-08-31 14:54:29 -05002408 device = sdev->hostdata;
2409 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2410 ctrl_info->scsi_host->host_no, device->bus,
2411 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002412 }
2413}
2414
2415static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2416{
2417 u8 scsi_status;
2418 u8 host_byte;
2419 struct scsi_cmnd *scmd;
2420 struct pqi_raid_error_info *error_info;
2421 size_t sense_data_length;
2422 int residual_count;
2423 int xfer_count;
2424 struct scsi_sense_hdr sshdr;
2425
2426 scmd = io_request->scmd;
2427 if (!scmd)
2428 return;
2429
2430 error_info = io_request->error_info;
2431 scsi_status = error_info->status;
2432 host_byte = DID_OK;
2433
2434 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2435 xfer_count =
2436 get_unaligned_le32(&error_info->data_out_transferred);
2437 residual_count = scsi_bufflen(scmd) - xfer_count;
2438 scsi_set_resid(scmd, residual_count);
2439 if (xfer_count < scmd->underflow)
2440 host_byte = DID_SOFT_ERROR;
2441 }
2442
2443 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2444 if (sense_data_length == 0)
2445 sense_data_length =
2446 get_unaligned_le16(&error_info->response_data_length);
2447 if (sense_data_length) {
2448 if (sense_data_length > sizeof(error_info->data))
2449 sense_data_length = sizeof(error_info->data);
2450
2451 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2452 scsi_normalize_sense(error_info->data,
2453 sense_data_length, &sshdr) &&
2454 sshdr.sense_key == HARDWARE_ERROR &&
2455 sshdr.asc == 0x3e &&
2456 sshdr.ascq == 0x1) {
2457 pqi_take_device_offline(scmd->device);
2458 host_byte = DID_NO_CONNECT;
2459 }
2460
2461 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2462 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2463 memcpy(scmd->sense_buffer, error_info->data,
2464 sense_data_length);
2465 }
2466
2467 scmd->result = scsi_status;
2468 set_host_byte(scmd, host_byte);
2469}
2470
2471static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2472{
2473 u8 scsi_status;
2474 u8 host_byte;
2475 struct scsi_cmnd *scmd;
2476 struct pqi_aio_error_info *error_info;
2477 size_t sense_data_length;
2478 int residual_count;
2479 int xfer_count;
2480 bool device_offline;
2481
2482 scmd = io_request->scmd;
2483 error_info = io_request->error_info;
2484 host_byte = DID_OK;
2485 sense_data_length = 0;
2486 device_offline = false;
2487
2488 switch (error_info->service_response) {
2489 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2490 scsi_status = error_info->status;
2491 break;
2492 case PQI_AIO_SERV_RESPONSE_FAILURE:
2493 switch (error_info->status) {
2494 case PQI_AIO_STATUS_IO_ABORTED:
2495 scsi_status = SAM_STAT_TASK_ABORTED;
2496 break;
2497 case PQI_AIO_STATUS_UNDERRUN:
2498 scsi_status = SAM_STAT_GOOD;
2499 residual_count = get_unaligned_le32(
2500 &error_info->residual_count);
2501 scsi_set_resid(scmd, residual_count);
2502 xfer_count = scsi_bufflen(scmd) - residual_count;
2503 if (xfer_count < scmd->underflow)
2504 host_byte = DID_SOFT_ERROR;
2505 break;
2506 case PQI_AIO_STATUS_OVERRUN:
2507 scsi_status = SAM_STAT_GOOD;
2508 break;
2509 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2510 pqi_aio_path_disabled(io_request);
2511 scsi_status = SAM_STAT_GOOD;
2512 io_request->status = -EAGAIN;
2513 break;
2514 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2515 case PQI_AIO_STATUS_INVALID_DEVICE:
2516 device_offline = true;
2517 pqi_take_device_offline(scmd->device);
2518 host_byte = DID_NO_CONNECT;
2519 scsi_status = SAM_STAT_CHECK_CONDITION;
2520 break;
2521 case PQI_AIO_STATUS_IO_ERROR:
2522 default:
2523 scsi_status = SAM_STAT_CHECK_CONDITION;
2524 break;
2525 }
2526 break;
2527 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2528 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2529 scsi_status = SAM_STAT_GOOD;
2530 break;
2531 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2532 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2533 default:
2534 scsi_status = SAM_STAT_CHECK_CONDITION;
2535 break;
2536 }
2537
2538 if (error_info->data_present) {
2539 sense_data_length =
2540 get_unaligned_le16(&error_info->data_length);
2541 if (sense_data_length) {
2542 if (sense_data_length > sizeof(error_info->data))
2543 sense_data_length = sizeof(error_info->data);
2544 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2545 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2546 memcpy(scmd->sense_buffer, error_info->data,
2547 sense_data_length);
2548 }
2549 }
2550
2551 if (device_offline && sense_data_length == 0)
2552 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2553 0x3e, 0x1);
2554
2555 scmd->result = scsi_status;
2556 set_host_byte(scmd, host_byte);
2557}
2558
2559static void pqi_process_io_error(unsigned int iu_type,
2560 struct pqi_io_request *io_request)
2561{
2562 switch (iu_type) {
2563 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2564 pqi_process_raid_io_error(io_request);
2565 break;
2566 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2567 pqi_process_aio_io_error(io_request);
2568 break;
2569 }
2570}
2571
2572static int pqi_interpret_task_management_response(
2573 struct pqi_task_management_response *response)
2574{
2575 int rc;
2576
2577 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002578 case SOP_TMF_COMPLETE:
2579 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002580 rc = 0;
2581 break;
2582 default:
2583 rc = -EIO;
2584 break;
2585 }
2586
2587 return rc;
2588}
2589
2590static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2591 struct pqi_queue_group *queue_group)
2592{
2593 unsigned int num_responses;
2594 pqi_index_t oq_pi;
2595 pqi_index_t oq_ci;
2596 struct pqi_io_request *io_request;
2597 struct pqi_io_response *response;
2598 u16 request_id;
2599
2600 num_responses = 0;
2601 oq_ci = queue_group->oq_ci_copy;
2602
2603 while (1) {
2604 oq_pi = *queue_group->oq_pi;
2605 if (oq_pi == oq_ci)
2606 break;
2607
2608 num_responses++;
2609 response = queue_group->oq_element_array +
2610 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2611
2612 request_id = get_unaligned_le16(&response->request_id);
2613 WARN_ON(request_id >= ctrl_info->max_io_slots);
2614
2615 io_request = &ctrl_info->io_request_pool[request_id];
2616 WARN_ON(atomic_read(&io_request->refcount) == 0);
2617
2618 switch (response->header.iu_type) {
2619 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2620 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2621 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2622 break;
2623 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2624 io_request->status =
2625 pqi_interpret_task_management_response(
2626 (void *)response);
2627 break;
2628 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2629 pqi_aio_path_disabled(io_request);
2630 io_request->status = -EAGAIN;
2631 break;
2632 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2633 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2634 io_request->error_info = ctrl_info->error_buffer +
2635 (get_unaligned_le16(&response->error_index) *
2636 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2637 pqi_process_io_error(response->header.iu_type,
2638 io_request);
2639 break;
2640 default:
2641 dev_err(&ctrl_info->pci_dev->dev,
2642 "unexpected IU type: 0x%x\n",
2643 response->header.iu_type);
2644 WARN_ON(response->header.iu_type);
2645 break;
2646 }
2647
2648 io_request->io_complete_callback(io_request,
2649 io_request->context);
2650
2651 /*
2652 * Note that the I/O request structure CANNOT BE TOUCHED after
2653 * returning from the I/O completion callback!
2654 */
2655
2656 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2657 }
2658
2659 if (num_responses) {
2660 queue_group->oq_ci_copy = oq_ci;
2661 writel(oq_ci, queue_group->oq_ci);
2662 }
2663
2664 return num_responses;
2665}
2666
2667static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002668 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002669{
2670 unsigned int num_elements_used;
2671
2672 if (pi >= ci)
2673 num_elements_used = pi - ci;
2674 else
2675 num_elements_used = elements_in_queue - ci + pi;
2676
2677 return elements_in_queue - num_elements_used - 1;
2678}
2679
2680#define PQI_EVENT_ACK_TIMEOUT 30
2681
2682static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2683 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2684{
2685 pqi_index_t iq_pi;
2686 pqi_index_t iq_ci;
2687 unsigned long flags;
2688 void *next_element;
2689 unsigned long timeout;
2690 struct pqi_queue_group *queue_group;
2691
2692 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2693 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2694
2695 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2696
2697 while (1) {
2698 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2699
2700 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2701 iq_ci = *queue_group->iq_ci[RAID_PATH];
2702
2703 if (pqi_num_elements_free(iq_pi, iq_ci,
2704 ctrl_info->num_elements_per_iq))
2705 break;
2706
2707 spin_unlock_irqrestore(
2708 &queue_group->submit_lock[RAID_PATH], flags);
2709
2710 if (time_after(jiffies, timeout)) {
2711 dev_err(&ctrl_info->pci_dev->dev,
2712 "sending event acknowledge timed out\n");
2713 return;
2714 }
2715 }
2716
2717 next_element = queue_group->iq_element_array[RAID_PATH] +
2718 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2719
2720 memcpy(next_element, iu, iu_length);
2721
2722 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2723
2724 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2725
2726 /*
2727 * This write notifies the controller that an IU is available to be
2728 * processed.
2729 */
2730 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2731
2732 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002733}
2734
2735static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2736 struct pqi_event *event)
2737{
2738 struct pqi_event_acknowledge_request request;
2739
2740 memset(&request, 0, sizeof(request));
2741
2742 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2743 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2744 &request.header.iu_length);
2745 request.event_type = event->event_type;
2746 request.event_id = event->event_id;
2747 request.additional_event_id = event->additional_event_id;
2748
2749 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2750}
2751
2752static void pqi_event_worker(struct work_struct *work)
2753{
2754 unsigned int i;
2755 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002756 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002757 bool got_non_heartbeat_event = false;
2758
2759 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2760
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002761 pqi_ctrl_busy(ctrl_info);
2762 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2763
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002764 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05002765 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002766 if (event->pending) {
2767 event->pending = false;
2768 pqi_acknowledge_event(ctrl_info, event);
2769 if (i != PQI_EVENT_TYPE_HEARTBEAT)
Kevin Barnett6c223762016-06-27 16:41:00 -05002770 got_non_heartbeat_event = true;
2771 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002772 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05002773 }
2774
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002775 pqi_ctrl_unbusy(ctrl_info);
2776
2777 pqi_schedule_rescan_worker(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002778}
2779
2780static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2781{
2782 unsigned int i;
2783 unsigned int path;
2784 struct pqi_queue_group *queue_group;
2785 unsigned long flags;
2786 struct pqi_io_request *io_request;
2787 struct pqi_io_request *next;
2788 struct scsi_cmnd *scmd;
2789
2790 ctrl_info->controller_online = false;
2791 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5b0fba02017-05-03 18:52:40 -05002792 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002793
2794 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2795 queue_group = &ctrl_info->queue_groups[i];
2796
2797 for (path = 0; path < 2; path++) {
2798 spin_lock_irqsave(
2799 &queue_group->submit_lock[path], flags);
2800
2801 list_for_each_entry_safe(io_request, next,
2802 &queue_group->request_list[path],
2803 request_list_entry) {
2804
2805 scmd = io_request->scmd;
2806 if (scmd) {
2807 set_host_byte(scmd, DID_NO_CONNECT);
2808 pqi_scsi_done(scmd);
2809 }
2810
2811 list_del(&io_request->request_list_entry);
2812 }
2813
2814 spin_unlock_irqrestore(
2815 &queue_group->submit_lock[path], flags);
2816 }
2817 }
2818}
2819
2820#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2821#define PQI_MAX_HEARTBEAT_REQUESTS 5
2822
2823static void pqi_heartbeat_timer_handler(unsigned long data)
2824{
2825 int num_interrupts;
2826 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2827
2828 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2829
2830 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2831 ctrl_info->num_heartbeats_requested++;
2832 if (ctrl_info->num_heartbeats_requested >
2833 PQI_MAX_HEARTBEAT_REQUESTS) {
2834 pqi_take_ctrl_offline(ctrl_info);
2835 return;
2836 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002837 ctrl_info->events[PQI_EVENT_HEARTBEAT].pending = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05002838 schedule_work(&ctrl_info->event_work);
2839 } else {
2840 ctrl_info->num_heartbeats_requested = 0;
2841 }
2842
2843 ctrl_info->previous_num_interrupts = num_interrupts;
2844 mod_timer(&ctrl_info->heartbeat_timer,
2845 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2846}
2847
2848static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2849{
2850 ctrl_info->previous_num_interrupts =
2851 atomic_read(&ctrl_info->num_interrupts);
2852
2853 init_timer(&ctrl_info->heartbeat_timer);
2854 ctrl_info->heartbeat_timer.expires =
2855 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2856 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2857 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2858 add_timer(&ctrl_info->heartbeat_timer);
2859 ctrl_info->heartbeat_timer_started = true;
2860}
2861
2862static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2863{
2864 if (ctrl_info->heartbeat_timer_started)
2865 del_timer_sync(&ctrl_info->heartbeat_timer);
2866}
2867
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002868static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05002869{
2870 int index;
2871
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002872 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2873 if (event_type == pqi_supported_event_types[index])
2874 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05002875
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002876 return -1;
2877}
2878
2879static inline bool pqi_is_supported_event(unsigned int event_type)
2880{
2881 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05002882}
2883
2884static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2885{
2886 unsigned int num_events;
2887 pqi_index_t oq_pi;
2888 pqi_index_t oq_ci;
2889 struct pqi_event_queue *event_queue;
2890 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002891 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002892 bool need_delayed_work;
2893 int event_index;
2894
2895 event_queue = &ctrl_info->event_queue;
2896 num_events = 0;
2897 need_delayed_work = false;
2898 oq_ci = event_queue->oq_ci_copy;
2899
2900 while (1) {
2901 oq_pi = *event_queue->oq_pi;
2902 if (oq_pi == oq_ci)
2903 break;
2904
2905 num_events++;
2906 response = event_queue->oq_element_array +
2907 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2908
2909 event_index =
2910 pqi_event_type_to_event_index(response->event_type);
2911
2912 if (event_index >= 0) {
2913 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002914 event = &ctrl_info->events[event_index];
2915 event->pending = true;
2916 event->event_type = response->event_type;
2917 event->event_id = response->event_id;
2918 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05002919 response->additional_event_id;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002920 if (event_index != PQI_EVENT_TYPE_HEARTBEAT) {
2921 event->pending = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05002922 need_delayed_work = true;
2923 }
2924 }
2925 }
2926
2927 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2928 }
2929
2930 if (num_events) {
2931 event_queue->oq_ci_copy = oq_ci;
2932 writel(oq_ci, event_queue->oq_ci);
2933
2934 if (need_delayed_work)
2935 schedule_work(&ctrl_info->event_work);
2936 }
2937
2938 return num_events;
2939}
2940
2941static irqreturn_t pqi_irq_handler(int irq, void *data)
2942{
2943 struct pqi_ctrl_info *ctrl_info;
2944 struct pqi_queue_group *queue_group;
2945 unsigned int num_responses_handled;
2946
2947 queue_group = data;
2948 ctrl_info = queue_group->ctrl_info;
2949
2950 if (!ctrl_info || !queue_group->oq_ci)
2951 return IRQ_NONE;
2952
2953 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2954
2955 if (irq == ctrl_info->event_irq)
2956 num_responses_handled += pqi_process_event_intr(ctrl_info);
2957
2958 if (num_responses_handled)
2959 atomic_inc(&ctrl_info->num_interrupts);
2960
2961 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2962 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2963
2964 return IRQ_HANDLED;
2965}
2966
2967static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2968{
Christoph Hellwig52198222016-11-01 08:12:49 -06002969 struct pci_dev *pdev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05002970 int i;
2971 int rc;
2972
Christoph Hellwig52198222016-11-01 08:12:49 -06002973 ctrl_info->event_irq = pci_irq_vector(pdev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05002974
2975 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Christoph Hellwig52198222016-11-01 08:12:49 -06002976 rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
2977 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002978 if (rc) {
Christoph Hellwig52198222016-11-01 08:12:49 -06002979 dev_err(&pdev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05002980 "irq %u init failed with error %d\n",
Christoph Hellwig52198222016-11-01 08:12:49 -06002981 pci_irq_vector(pdev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05002982 return rc;
2983 }
2984 ctrl_info->num_msix_vectors_initialized++;
2985 }
2986
2987 return 0;
2988}
2989
Kevin Barnett98bf0612017-05-03 18:52:28 -05002990static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2991{
2992 int i;
2993
2994 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2995 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
2996 &ctrl_info->queue_groups[i]);
2997
2998 ctrl_info->num_msix_vectors_initialized = 0;
2999}
3000
Kevin Barnett6c223762016-06-27 16:41:00 -05003001static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3002{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003003 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003004
Kevin Barnett98bf0612017-05-03 18:52:28 -05003005 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003006 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3007 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003008 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003009 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003010 "MSI-X init failed with error %d\n",
3011 num_vectors_enabled);
3012 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003013 }
3014
Kevin Barnett98bf0612017-05-03 18:52:28 -05003015 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3016
Kevin Barnett6c223762016-06-27 16:41:00 -05003017 return 0;
3018}
3019
Kevin Barnett98bf0612017-05-03 18:52:28 -05003020static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3021{
3022 if (ctrl_info->num_msix_vectors_enabled) {
3023 pci_free_irq_vectors(ctrl_info->pci_dev);
3024 ctrl_info->num_msix_vectors_enabled = 0;
3025 }
3026}
3027
Kevin Barnett6c223762016-06-27 16:41:00 -05003028static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3029{
3030 unsigned int i;
3031 size_t alloc_length;
3032 size_t element_array_length_per_iq;
3033 size_t element_array_length_per_oq;
3034 void *element_array;
3035 void *next_queue_index;
3036 void *aligned_pointer;
3037 unsigned int num_inbound_queues;
3038 unsigned int num_outbound_queues;
3039 unsigned int num_queue_indexes;
3040 struct pqi_queue_group *queue_group;
3041
3042 element_array_length_per_iq =
3043 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3044 ctrl_info->num_elements_per_iq;
3045 element_array_length_per_oq =
3046 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3047 ctrl_info->num_elements_per_oq;
3048 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3049 num_outbound_queues = ctrl_info->num_queue_groups;
3050 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3051
3052 aligned_pointer = NULL;
3053
3054 for (i = 0; i < num_inbound_queues; i++) {
3055 aligned_pointer = PTR_ALIGN(aligned_pointer,
3056 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3057 aligned_pointer += element_array_length_per_iq;
3058 }
3059
3060 for (i = 0; i < num_outbound_queues; i++) {
3061 aligned_pointer = PTR_ALIGN(aligned_pointer,
3062 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3063 aligned_pointer += element_array_length_per_oq;
3064 }
3065
3066 aligned_pointer = PTR_ALIGN(aligned_pointer,
3067 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3068 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3069 PQI_EVENT_OQ_ELEMENT_LENGTH;
3070
3071 for (i = 0; i < num_queue_indexes; i++) {
3072 aligned_pointer = PTR_ALIGN(aligned_pointer,
3073 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3074 aligned_pointer += sizeof(pqi_index_t);
3075 }
3076
3077 alloc_length = (size_t)aligned_pointer +
3078 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3079
3080 ctrl_info->queue_memory_base =
3081 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3082 alloc_length,
3083 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3084
3085 if (!ctrl_info->queue_memory_base) {
3086 dev_err(&ctrl_info->pci_dev->dev,
3087 "failed to allocate memory for PQI admin queues\n");
3088 return -ENOMEM;
3089 }
3090
3091 ctrl_info->queue_memory_length = alloc_length;
3092
3093 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3094 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3095
3096 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3097 queue_group = &ctrl_info->queue_groups[i];
3098 queue_group->iq_element_array[RAID_PATH] = element_array;
3099 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3100 ctrl_info->queue_memory_base_dma_handle +
3101 (element_array - ctrl_info->queue_memory_base);
3102 element_array += element_array_length_per_iq;
3103 element_array = PTR_ALIGN(element_array,
3104 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3105 queue_group->iq_element_array[AIO_PATH] = element_array;
3106 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3107 ctrl_info->queue_memory_base_dma_handle +
3108 (element_array - ctrl_info->queue_memory_base);
3109 element_array += element_array_length_per_iq;
3110 element_array = PTR_ALIGN(element_array,
3111 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3112 }
3113
3114 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3115 queue_group = &ctrl_info->queue_groups[i];
3116 queue_group->oq_element_array = element_array;
3117 queue_group->oq_element_array_bus_addr =
3118 ctrl_info->queue_memory_base_dma_handle +
3119 (element_array - ctrl_info->queue_memory_base);
3120 element_array += element_array_length_per_oq;
3121 element_array = PTR_ALIGN(element_array,
3122 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3123 }
3124
3125 ctrl_info->event_queue.oq_element_array = element_array;
3126 ctrl_info->event_queue.oq_element_array_bus_addr =
3127 ctrl_info->queue_memory_base_dma_handle +
3128 (element_array - ctrl_info->queue_memory_base);
3129 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3130 PQI_EVENT_OQ_ELEMENT_LENGTH;
3131
3132 next_queue_index = PTR_ALIGN(element_array,
3133 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3134
3135 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3136 queue_group = &ctrl_info->queue_groups[i];
3137 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3138 queue_group->iq_ci_bus_addr[RAID_PATH] =
3139 ctrl_info->queue_memory_base_dma_handle +
3140 (next_queue_index - ctrl_info->queue_memory_base);
3141 next_queue_index += sizeof(pqi_index_t);
3142 next_queue_index = PTR_ALIGN(next_queue_index,
3143 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3144 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3145 queue_group->iq_ci_bus_addr[AIO_PATH] =
3146 ctrl_info->queue_memory_base_dma_handle +
3147 (next_queue_index - ctrl_info->queue_memory_base);
3148 next_queue_index += sizeof(pqi_index_t);
3149 next_queue_index = PTR_ALIGN(next_queue_index,
3150 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3151 queue_group->oq_pi = next_queue_index;
3152 queue_group->oq_pi_bus_addr =
3153 ctrl_info->queue_memory_base_dma_handle +
3154 (next_queue_index - ctrl_info->queue_memory_base);
3155 next_queue_index += sizeof(pqi_index_t);
3156 next_queue_index = PTR_ALIGN(next_queue_index,
3157 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3158 }
3159
3160 ctrl_info->event_queue.oq_pi = next_queue_index;
3161 ctrl_info->event_queue.oq_pi_bus_addr =
3162 ctrl_info->queue_memory_base_dma_handle +
3163 (next_queue_index - ctrl_info->queue_memory_base);
3164
3165 return 0;
3166}
3167
3168static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3169{
3170 unsigned int i;
3171 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3172 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3173
3174 /*
3175 * Initialize the backpointers to the controller structure in
3176 * each operational queue group structure.
3177 */
3178 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3179 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3180
3181 /*
3182 * Assign IDs to all operational queues. Note that the IDs
3183 * assigned to operational IQs are independent of the IDs
3184 * assigned to operational OQs.
3185 */
3186 ctrl_info->event_queue.oq_id = next_oq_id++;
3187 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3188 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3189 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3190 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3191 }
3192
3193 /*
3194 * Assign MSI-X table entry indexes to all queues. Note that the
3195 * interrupt for the event queue is shared with the first queue group.
3196 */
3197 ctrl_info->event_queue.int_msg_num = 0;
3198 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3199 ctrl_info->queue_groups[i].int_msg_num = i;
3200
3201 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3202 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3203 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3204 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3205 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3206 }
3207}
3208
3209static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3210{
3211 size_t alloc_length;
3212 struct pqi_admin_queues_aligned *admin_queues_aligned;
3213 struct pqi_admin_queues *admin_queues;
3214
3215 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3217
3218 ctrl_info->admin_queue_memory_base =
3219 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3220 alloc_length,
3221 &ctrl_info->admin_queue_memory_base_dma_handle,
3222 GFP_KERNEL);
3223
3224 if (!ctrl_info->admin_queue_memory_base)
3225 return -ENOMEM;
3226
3227 ctrl_info->admin_queue_memory_length = alloc_length;
3228
3229 admin_queues = &ctrl_info->admin_queues;
3230 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3231 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3232 admin_queues->iq_element_array =
3233 &admin_queues_aligned->iq_element_array;
3234 admin_queues->oq_element_array =
3235 &admin_queues_aligned->oq_element_array;
3236 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3237 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3238
3239 admin_queues->iq_element_array_bus_addr =
3240 ctrl_info->admin_queue_memory_base_dma_handle +
3241 (admin_queues->iq_element_array -
3242 ctrl_info->admin_queue_memory_base);
3243 admin_queues->oq_element_array_bus_addr =
3244 ctrl_info->admin_queue_memory_base_dma_handle +
3245 (admin_queues->oq_element_array -
3246 ctrl_info->admin_queue_memory_base);
3247 admin_queues->iq_ci_bus_addr =
3248 ctrl_info->admin_queue_memory_base_dma_handle +
3249 ((void *)admin_queues->iq_ci -
3250 ctrl_info->admin_queue_memory_base);
3251 admin_queues->oq_pi_bus_addr =
3252 ctrl_info->admin_queue_memory_base_dma_handle +
3253 ((void *)admin_queues->oq_pi -
3254 ctrl_info->admin_queue_memory_base);
3255
3256 return 0;
3257}
3258
3259#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3260#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3261
3262static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3263{
3264 struct pqi_device_registers __iomem *pqi_registers;
3265 struct pqi_admin_queues *admin_queues;
3266 unsigned long timeout;
3267 u8 status;
3268 u32 reg;
3269
3270 pqi_registers = ctrl_info->pqi_registers;
3271 admin_queues = &ctrl_info->admin_queues;
3272
3273 writeq((u64)admin_queues->iq_element_array_bus_addr,
3274 &pqi_registers->admin_iq_element_array_addr);
3275 writeq((u64)admin_queues->oq_element_array_bus_addr,
3276 &pqi_registers->admin_oq_element_array_addr);
3277 writeq((u64)admin_queues->iq_ci_bus_addr,
3278 &pqi_registers->admin_iq_ci_addr);
3279 writeq((u64)admin_queues->oq_pi_bus_addr,
3280 &pqi_registers->admin_oq_pi_addr);
3281
3282 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3283 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3284 (admin_queues->int_msg_num << 16);
3285 writel(reg, &pqi_registers->admin_iq_num_elements);
3286 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3287 &pqi_registers->function_and_status_code);
3288
3289 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3290 while (1) {
3291 status = readb(&pqi_registers->function_and_status_code);
3292 if (status == PQI_STATUS_IDLE)
3293 break;
3294 if (time_after(jiffies, timeout))
3295 return -ETIMEDOUT;
3296 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3297 }
3298
3299 /*
3300 * The offset registers are not initialized to the correct
3301 * offsets until *after* the create admin queue pair command
3302 * completes successfully.
3303 */
3304 admin_queues->iq_pi = ctrl_info->iomem_base +
3305 PQI_DEVICE_REGISTERS_OFFSET +
3306 readq(&pqi_registers->admin_iq_pi_offset);
3307 admin_queues->oq_ci = ctrl_info->iomem_base +
3308 PQI_DEVICE_REGISTERS_OFFSET +
3309 readq(&pqi_registers->admin_oq_ci_offset);
3310
3311 return 0;
3312}
3313
3314static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3315 struct pqi_general_admin_request *request)
3316{
3317 struct pqi_admin_queues *admin_queues;
3318 void *next_element;
3319 pqi_index_t iq_pi;
3320
3321 admin_queues = &ctrl_info->admin_queues;
3322 iq_pi = admin_queues->iq_pi_copy;
3323
3324 next_element = admin_queues->iq_element_array +
3325 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3326
3327 memcpy(next_element, request, sizeof(*request));
3328
3329 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3330 admin_queues->iq_pi_copy = iq_pi;
3331
3332 /*
3333 * This write notifies the controller that an IU is available to be
3334 * processed.
3335 */
3336 writel(iq_pi, admin_queues->iq_pi);
3337}
3338
3339static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3340 struct pqi_general_admin_response *response)
3341{
3342 struct pqi_admin_queues *admin_queues;
3343 pqi_index_t oq_pi;
3344 pqi_index_t oq_ci;
3345 unsigned long timeout;
3346
3347 admin_queues = &ctrl_info->admin_queues;
3348 oq_ci = admin_queues->oq_ci_copy;
3349
3350 timeout = (3 * HZ) + jiffies;
3351
3352 while (1) {
3353 oq_pi = *admin_queues->oq_pi;
3354 if (oq_pi != oq_ci)
3355 break;
3356 if (time_after(jiffies, timeout)) {
3357 dev_err(&ctrl_info->pci_dev->dev,
3358 "timed out waiting for admin response\n");
3359 return -ETIMEDOUT;
3360 }
3361 usleep_range(1000, 2000);
3362 }
3363
3364 memcpy(response, admin_queues->oq_element_array +
3365 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3366
3367 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3368 admin_queues->oq_ci_copy = oq_ci;
3369 writel(oq_ci, admin_queues->oq_ci);
3370
3371 return 0;
3372}
3373
3374static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3375 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3376 struct pqi_io_request *io_request)
3377{
3378 struct pqi_io_request *next;
3379 void *next_element;
3380 pqi_index_t iq_pi;
3381 pqi_index_t iq_ci;
3382 size_t iu_length;
3383 unsigned long flags;
3384 unsigned int num_elements_needed;
3385 unsigned int num_elements_to_end_of_queue;
3386 size_t copy_count;
3387 struct pqi_iu_header *request;
3388
3389 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3390
3391 if (io_request)
3392 list_add_tail(&io_request->request_list_entry,
3393 &queue_group->request_list[path]);
3394
3395 iq_pi = queue_group->iq_pi_copy[path];
3396
3397 list_for_each_entry_safe(io_request, next,
3398 &queue_group->request_list[path], request_list_entry) {
3399
3400 request = io_request->iu;
3401
3402 iu_length = get_unaligned_le16(&request->iu_length) +
3403 PQI_REQUEST_HEADER_LENGTH;
3404 num_elements_needed =
3405 DIV_ROUND_UP(iu_length,
3406 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3407
3408 iq_ci = *queue_group->iq_ci[path];
3409
3410 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3411 ctrl_info->num_elements_per_iq))
3412 break;
3413
3414 put_unaligned_le16(queue_group->oq_id,
3415 &request->response_queue_id);
3416
3417 next_element = queue_group->iq_element_array[path] +
3418 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3419
3420 num_elements_to_end_of_queue =
3421 ctrl_info->num_elements_per_iq - iq_pi;
3422
3423 if (num_elements_needed <= num_elements_to_end_of_queue) {
3424 memcpy(next_element, request, iu_length);
3425 } else {
3426 copy_count = num_elements_to_end_of_queue *
3427 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3428 memcpy(next_element, request, copy_count);
3429 memcpy(queue_group->iq_element_array[path],
3430 (u8 *)request + copy_count,
3431 iu_length - copy_count);
3432 }
3433
3434 iq_pi = (iq_pi + num_elements_needed) %
3435 ctrl_info->num_elements_per_iq;
3436
3437 list_del(&io_request->request_list_entry);
3438 }
3439
3440 if (iq_pi != queue_group->iq_pi_copy[path]) {
3441 queue_group->iq_pi_copy[path] = iq_pi;
3442 /*
3443 * This write notifies the controller that one or more IUs are
3444 * available to be processed.
3445 */
3446 writel(iq_pi, queue_group->iq_pi[path]);
3447 }
3448
3449 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3450}
3451
3452static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3453 void *context)
3454{
3455 struct completion *waiting = context;
3456
3457 complete(waiting);
3458}
3459
3460static int pqi_submit_raid_request_synchronous_with_io_request(
3461 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3462 unsigned long timeout_msecs)
3463{
3464 int rc = 0;
3465 DECLARE_COMPLETION_ONSTACK(wait);
3466
3467 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3468 io_request->context = &wait;
3469
3470 pqi_start_io(ctrl_info,
3471 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3472 io_request);
3473
3474 if (timeout_msecs == NO_TIMEOUT) {
3475 wait_for_completion_io(&wait);
3476 } else {
3477 if (!wait_for_completion_io_timeout(&wait,
3478 msecs_to_jiffies(timeout_msecs))) {
3479 dev_warn(&ctrl_info->pci_dev->dev,
3480 "command timed out\n");
3481 rc = -ETIMEDOUT;
3482 }
3483 }
3484
3485 return rc;
3486}
3487
3488static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3489 struct pqi_iu_header *request, unsigned int flags,
3490 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3491{
3492 int rc;
3493 struct pqi_io_request *io_request;
3494 unsigned long start_jiffies;
3495 unsigned long msecs_blocked;
3496 size_t iu_length;
3497
3498 /*
3499 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3500 * are mutually exclusive.
3501 */
3502
3503 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3504 if (down_interruptible(&ctrl_info->sync_request_sem))
3505 return -ERESTARTSYS;
3506 } else {
3507 if (timeout_msecs == NO_TIMEOUT) {
3508 down(&ctrl_info->sync_request_sem);
3509 } else {
3510 start_jiffies = jiffies;
3511 if (down_timeout(&ctrl_info->sync_request_sem,
3512 msecs_to_jiffies(timeout_msecs)))
3513 return -ETIMEDOUT;
3514 msecs_blocked =
3515 jiffies_to_msecs(jiffies - start_jiffies);
3516 if (msecs_blocked >= timeout_msecs)
3517 return -ETIMEDOUT;
3518 timeout_msecs -= msecs_blocked;
3519 }
3520 }
3521
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003522 pqi_ctrl_busy(ctrl_info);
3523 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3524 if (timeout_msecs == 0) {
3525 rc = -ETIMEDOUT;
3526 goto out;
3527 }
3528
Kevin Barnett6c223762016-06-27 16:41:00 -05003529 io_request = pqi_alloc_io_request(ctrl_info);
3530
3531 put_unaligned_le16(io_request->index,
3532 &(((struct pqi_raid_path_request *)request)->request_id));
3533
3534 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3535 ((struct pqi_raid_path_request *)request)->error_index =
3536 ((struct pqi_raid_path_request *)request)->request_id;
3537
3538 iu_length = get_unaligned_le16(&request->iu_length) +
3539 PQI_REQUEST_HEADER_LENGTH;
3540 memcpy(io_request->iu, request, iu_length);
3541
3542 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3543 io_request, timeout_msecs);
3544
3545 if (error_info) {
3546 if (io_request->error_info)
3547 memcpy(error_info, io_request->error_info,
3548 sizeof(*error_info));
3549 else
3550 memset(error_info, 0, sizeof(*error_info));
3551 } else if (rc == 0 && io_request->error_info) {
3552 u8 scsi_status;
3553 struct pqi_raid_error_info *raid_error_info;
3554
3555 raid_error_info = io_request->error_info;
3556 scsi_status = raid_error_info->status;
3557
3558 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3559 raid_error_info->data_out_result ==
3560 PQI_DATA_IN_OUT_UNDERFLOW)
3561 scsi_status = SAM_STAT_GOOD;
3562
3563 if (scsi_status != SAM_STAT_GOOD)
3564 rc = -EIO;
3565 }
3566
3567 pqi_free_io_request(io_request);
3568
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003569out:
3570 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003571 up(&ctrl_info->sync_request_sem);
3572
3573 return rc;
3574}
3575
3576static int pqi_validate_admin_response(
3577 struct pqi_general_admin_response *response, u8 expected_function_code)
3578{
3579 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3580 return -EINVAL;
3581
3582 if (get_unaligned_le16(&response->header.iu_length) !=
3583 PQI_GENERAL_ADMIN_IU_LENGTH)
3584 return -EINVAL;
3585
3586 if (response->function_code != expected_function_code)
3587 return -EINVAL;
3588
3589 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3590 return -EINVAL;
3591
3592 return 0;
3593}
3594
3595static int pqi_submit_admin_request_synchronous(
3596 struct pqi_ctrl_info *ctrl_info,
3597 struct pqi_general_admin_request *request,
3598 struct pqi_general_admin_response *response)
3599{
3600 int rc;
3601
3602 pqi_submit_admin_request(ctrl_info, request);
3603
3604 rc = pqi_poll_for_admin_response(ctrl_info, response);
3605
3606 if (rc == 0)
3607 rc = pqi_validate_admin_response(response,
3608 request->function_code);
3609
3610 return rc;
3611}
3612
3613static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3614{
3615 int rc;
3616 struct pqi_general_admin_request request;
3617 struct pqi_general_admin_response response;
3618 struct pqi_device_capability *capability;
3619 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3620
3621 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3622 if (!capability)
3623 return -ENOMEM;
3624
3625 memset(&request, 0, sizeof(request));
3626
3627 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3628 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3629 &request.header.iu_length);
3630 request.function_code =
3631 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3632 put_unaligned_le32(sizeof(*capability),
3633 &request.data.report_device_capability.buffer_length);
3634
3635 rc = pqi_map_single(ctrl_info->pci_dev,
3636 &request.data.report_device_capability.sg_descriptor,
3637 capability, sizeof(*capability),
3638 PCI_DMA_FROMDEVICE);
3639 if (rc)
3640 goto out;
3641
3642 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3643 &response);
3644
3645 pqi_pci_unmap(ctrl_info->pci_dev,
3646 &request.data.report_device_capability.sg_descriptor, 1,
3647 PCI_DMA_FROMDEVICE);
3648
3649 if (rc)
3650 goto out;
3651
3652 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3653 rc = -EIO;
3654 goto out;
3655 }
3656
3657 ctrl_info->max_inbound_queues =
3658 get_unaligned_le16(&capability->max_inbound_queues);
3659 ctrl_info->max_elements_per_iq =
3660 get_unaligned_le16(&capability->max_elements_per_iq);
3661 ctrl_info->max_iq_element_length =
3662 get_unaligned_le16(&capability->max_iq_element_length)
3663 * 16;
3664 ctrl_info->max_outbound_queues =
3665 get_unaligned_le16(&capability->max_outbound_queues);
3666 ctrl_info->max_elements_per_oq =
3667 get_unaligned_le16(&capability->max_elements_per_oq);
3668 ctrl_info->max_oq_element_length =
3669 get_unaligned_le16(&capability->max_oq_element_length)
3670 * 16;
3671
3672 sop_iu_layer_descriptor =
3673 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3674
3675 ctrl_info->max_inbound_iu_length_per_firmware =
3676 get_unaligned_le16(
3677 &sop_iu_layer_descriptor->max_inbound_iu_length);
3678 ctrl_info->inbound_spanning_supported =
3679 sop_iu_layer_descriptor->inbound_spanning_supported;
3680 ctrl_info->outbound_spanning_supported =
3681 sop_iu_layer_descriptor->outbound_spanning_supported;
3682
3683out:
3684 kfree(capability);
3685
3686 return rc;
3687}
3688
3689static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3690{
3691 if (ctrl_info->max_iq_element_length <
3692 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3693 dev_err(&ctrl_info->pci_dev->dev,
3694 "max. inbound queue element length of %d is less than the required length of %d\n",
3695 ctrl_info->max_iq_element_length,
3696 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3697 return -EINVAL;
3698 }
3699
3700 if (ctrl_info->max_oq_element_length <
3701 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3702 dev_err(&ctrl_info->pci_dev->dev,
3703 "max. outbound queue element length of %d is less than the required length of %d\n",
3704 ctrl_info->max_oq_element_length,
3705 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3706 return -EINVAL;
3707 }
3708
3709 if (ctrl_info->max_inbound_iu_length_per_firmware <
3710 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3711 dev_err(&ctrl_info->pci_dev->dev,
3712 "max. inbound IU length of %u is less than the min. required length of %d\n",
3713 ctrl_info->max_inbound_iu_length_per_firmware,
3714 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3715 return -EINVAL;
3716 }
3717
Kevin Barnett77668f42016-08-31 14:54:23 -05003718 if (!ctrl_info->inbound_spanning_supported) {
3719 dev_err(&ctrl_info->pci_dev->dev,
3720 "the controller does not support inbound spanning\n");
3721 return -EINVAL;
3722 }
3723
3724 if (ctrl_info->outbound_spanning_supported) {
3725 dev_err(&ctrl_info->pci_dev->dev,
3726 "the controller supports outbound spanning but this driver does not\n");
3727 return -EINVAL;
3728 }
3729
Kevin Barnett6c223762016-06-27 16:41:00 -05003730 return 0;
3731}
3732
3733static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3734 bool inbound_queue, u16 queue_id)
3735{
3736 struct pqi_general_admin_request request;
3737 struct pqi_general_admin_response response;
3738
3739 memset(&request, 0, sizeof(request));
3740 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3741 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3742 &request.header.iu_length);
3743 if (inbound_queue)
3744 request.function_code =
3745 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3746 else
3747 request.function_code =
3748 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3749 put_unaligned_le16(queue_id,
3750 &request.data.delete_operational_queue.queue_id);
3751
3752 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3753 &response);
3754}
3755
3756static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3757{
3758 int rc;
3759 struct pqi_event_queue *event_queue;
3760 struct pqi_general_admin_request request;
3761 struct pqi_general_admin_response response;
3762
3763 event_queue = &ctrl_info->event_queue;
3764
3765 /*
3766 * Create OQ (Outbound Queue - device to host queue) to dedicate
3767 * to events.
3768 */
3769 memset(&request, 0, sizeof(request));
3770 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3771 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3772 &request.header.iu_length);
3773 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3774 put_unaligned_le16(event_queue->oq_id,
3775 &request.data.create_operational_oq.queue_id);
3776 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3777 &request.data.create_operational_oq.element_array_addr);
3778 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3779 &request.data.create_operational_oq.pi_addr);
3780 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3781 &request.data.create_operational_oq.num_elements);
3782 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3783 &request.data.create_operational_oq.element_length);
3784 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3785 put_unaligned_le16(event_queue->int_msg_num,
3786 &request.data.create_operational_oq.int_msg_num);
3787
3788 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3789 &response);
3790 if (rc)
3791 return rc;
3792
3793 event_queue->oq_ci = ctrl_info->iomem_base +
3794 PQI_DEVICE_REGISTERS_OFFSET +
3795 get_unaligned_le64(
3796 &response.data.create_operational_oq.oq_ci_offset);
3797
3798 return 0;
3799}
3800
3801static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3802{
3803 unsigned int i;
3804 int rc;
3805 struct pqi_queue_group *queue_group;
3806 struct pqi_general_admin_request request;
3807 struct pqi_general_admin_response response;
3808
3809 i = ctrl_info->num_active_queue_groups;
3810 queue_group = &ctrl_info->queue_groups[i];
3811
3812 /*
3813 * Create IQ (Inbound Queue - host to device queue) for
3814 * RAID path.
3815 */
3816 memset(&request, 0, sizeof(request));
3817 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3818 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3819 &request.header.iu_length);
3820 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3821 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3822 &request.data.create_operational_iq.queue_id);
3823 put_unaligned_le64(
3824 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3825 &request.data.create_operational_iq.element_array_addr);
3826 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3827 &request.data.create_operational_iq.ci_addr);
3828 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3829 &request.data.create_operational_iq.num_elements);
3830 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3831 &request.data.create_operational_iq.element_length);
3832 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3833
3834 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3835 &response);
3836 if (rc) {
3837 dev_err(&ctrl_info->pci_dev->dev,
3838 "error creating inbound RAID queue\n");
3839 return rc;
3840 }
3841
3842 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3843 PQI_DEVICE_REGISTERS_OFFSET +
3844 get_unaligned_le64(
3845 &response.data.create_operational_iq.iq_pi_offset);
3846
3847 /*
3848 * Create IQ (Inbound Queue - host to device queue) for
3849 * Advanced I/O (AIO) path.
3850 */
3851 memset(&request, 0, sizeof(request));
3852 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3853 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3854 &request.header.iu_length);
3855 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3856 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3857 &request.data.create_operational_iq.queue_id);
3858 put_unaligned_le64((u64)queue_group->
3859 iq_element_array_bus_addr[AIO_PATH],
3860 &request.data.create_operational_iq.element_array_addr);
3861 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3862 &request.data.create_operational_iq.ci_addr);
3863 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3864 &request.data.create_operational_iq.num_elements);
3865 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3866 &request.data.create_operational_iq.element_length);
3867 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3868
3869 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3870 &response);
3871 if (rc) {
3872 dev_err(&ctrl_info->pci_dev->dev,
3873 "error creating inbound AIO queue\n");
3874 goto delete_inbound_queue_raid;
3875 }
3876
3877 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3878 PQI_DEVICE_REGISTERS_OFFSET +
3879 get_unaligned_le64(
3880 &response.data.create_operational_iq.iq_pi_offset);
3881
3882 /*
3883 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3884 * assumed to be for RAID path I/O unless we change the queue's
3885 * property.
3886 */
3887 memset(&request, 0, sizeof(request));
3888 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3889 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3890 &request.header.iu_length);
3891 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3892 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3893 &request.data.change_operational_iq_properties.queue_id);
3894 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3895 &request.data.change_operational_iq_properties.vendor_specific);
3896
3897 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3898 &response);
3899 if (rc) {
3900 dev_err(&ctrl_info->pci_dev->dev,
3901 "error changing queue property\n");
3902 goto delete_inbound_queue_aio;
3903 }
3904
3905 /*
3906 * Create OQ (Outbound Queue - device to host queue).
3907 */
3908 memset(&request, 0, sizeof(request));
3909 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3910 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3911 &request.header.iu_length);
3912 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3913 put_unaligned_le16(queue_group->oq_id,
3914 &request.data.create_operational_oq.queue_id);
3915 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3916 &request.data.create_operational_oq.element_array_addr);
3917 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3918 &request.data.create_operational_oq.pi_addr);
3919 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3920 &request.data.create_operational_oq.num_elements);
3921 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3922 &request.data.create_operational_oq.element_length);
3923 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3924 put_unaligned_le16(queue_group->int_msg_num,
3925 &request.data.create_operational_oq.int_msg_num);
3926
3927 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3928 &response);
3929 if (rc) {
3930 dev_err(&ctrl_info->pci_dev->dev,
3931 "error creating outbound queue\n");
3932 goto delete_inbound_queue_aio;
3933 }
3934
3935 queue_group->oq_ci = ctrl_info->iomem_base +
3936 PQI_DEVICE_REGISTERS_OFFSET +
3937 get_unaligned_le64(
3938 &response.data.create_operational_oq.oq_ci_offset);
3939
3940 ctrl_info->num_active_queue_groups++;
3941
3942 return 0;
3943
3944delete_inbound_queue_aio:
3945 pqi_delete_operational_queue(ctrl_info, true,
3946 queue_group->iq_id[AIO_PATH]);
3947
3948delete_inbound_queue_raid:
3949 pqi_delete_operational_queue(ctrl_info, true,
3950 queue_group->iq_id[RAID_PATH]);
3951
3952 return rc;
3953}
3954
3955static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3956{
3957 int rc;
3958 unsigned int i;
3959
3960 rc = pqi_create_event_queue(ctrl_info);
3961 if (rc) {
3962 dev_err(&ctrl_info->pci_dev->dev,
3963 "error creating event queue\n");
3964 return rc;
3965 }
3966
3967 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3968 rc = pqi_create_queue_group(ctrl_info);
3969 if (rc) {
3970 dev_err(&ctrl_info->pci_dev->dev,
3971 "error creating queue group number %u/%u\n",
3972 i, ctrl_info->num_queue_groups);
3973 return rc;
3974 }
3975 }
3976
3977 return 0;
3978}
3979
3980#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3981 (offsetof(struct pqi_event_config, descriptors) + \
3982 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3983
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003984static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
3985 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05003986{
3987 int rc;
3988 unsigned int i;
3989 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003990 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05003991 struct pqi_general_management_request request;
3992
3993 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3994 GFP_KERNEL);
3995 if (!event_config)
3996 return -ENOMEM;
3997
3998 memset(&request, 0, sizeof(request));
3999
4000 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4001 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4002 data.report_event_configuration.sg_descriptors[1]) -
4003 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4004 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4005 &request.data.report_event_configuration.buffer_length);
4006
4007 rc = pqi_map_single(ctrl_info->pci_dev,
4008 request.data.report_event_configuration.sg_descriptors,
4009 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4010 PCI_DMA_FROMDEVICE);
4011 if (rc)
4012 goto out;
4013
4014 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4015 0, NULL, NO_TIMEOUT);
4016
4017 pqi_pci_unmap(ctrl_info->pci_dev,
4018 request.data.report_event_configuration.sg_descriptors, 1,
4019 PCI_DMA_FROMDEVICE);
4020
4021 if (rc)
4022 goto out;
4023
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004024 for (i = 0; i < event_config->num_event_descriptors; i++) {
4025 event_descriptor = &event_config->descriptors[i];
4026 if (enable_events &&
4027 pqi_is_supported_event(event_descriptor->event_type))
4028 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4029 &event_descriptor->oq_id);
4030 else
4031 put_unaligned_le16(0, &event_descriptor->oq_id);
4032 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004033
4034 memset(&request, 0, sizeof(request));
4035
4036 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4037 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4038 data.report_event_configuration.sg_descriptors[1]) -
4039 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4040 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4041 &request.data.report_event_configuration.buffer_length);
4042
4043 rc = pqi_map_single(ctrl_info->pci_dev,
4044 request.data.report_event_configuration.sg_descriptors,
4045 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4046 PCI_DMA_TODEVICE);
4047 if (rc)
4048 goto out;
4049
4050 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4051 NULL, NO_TIMEOUT);
4052
4053 pqi_pci_unmap(ctrl_info->pci_dev,
4054 request.data.report_event_configuration.sg_descriptors, 1,
4055 PCI_DMA_TODEVICE);
4056
4057out:
4058 kfree(event_config);
4059
4060 return rc;
4061}
4062
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004063static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4064{
4065 return pqi_configure_events(ctrl_info, true);
4066}
4067
4068static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4069{
4070 return pqi_configure_events(ctrl_info, false);
4071}
4072
Kevin Barnett6c223762016-06-27 16:41:00 -05004073static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4074{
4075 unsigned int i;
4076 struct device *dev;
4077 size_t sg_chain_buffer_length;
4078 struct pqi_io_request *io_request;
4079
4080 if (!ctrl_info->io_request_pool)
4081 return;
4082
4083 dev = &ctrl_info->pci_dev->dev;
4084 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4085 io_request = ctrl_info->io_request_pool;
4086
4087 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4088 kfree(io_request->iu);
4089 if (!io_request->sg_chain_buffer)
4090 break;
4091 dma_free_coherent(dev, sg_chain_buffer_length,
4092 io_request->sg_chain_buffer,
4093 io_request->sg_chain_buffer_dma_handle);
4094 io_request++;
4095 }
4096
4097 kfree(ctrl_info->io_request_pool);
4098 ctrl_info->io_request_pool = NULL;
4099}
4100
4101static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4102{
4103 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4104 ctrl_info->error_buffer_length,
4105 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4106
4107 if (!ctrl_info->error_buffer)
4108 return -ENOMEM;
4109
4110 return 0;
4111}
4112
4113static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4114{
4115 unsigned int i;
4116 void *sg_chain_buffer;
4117 size_t sg_chain_buffer_length;
4118 dma_addr_t sg_chain_buffer_dma_handle;
4119 struct device *dev;
4120 struct pqi_io_request *io_request;
4121
4122 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4123 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4124
4125 if (!ctrl_info->io_request_pool) {
4126 dev_err(&ctrl_info->pci_dev->dev,
4127 "failed to allocate I/O request pool\n");
4128 goto error;
4129 }
4130
4131 dev = &ctrl_info->pci_dev->dev;
4132 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4133 io_request = ctrl_info->io_request_pool;
4134
4135 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4136 io_request->iu =
4137 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4138
4139 if (!io_request->iu) {
4140 dev_err(&ctrl_info->pci_dev->dev,
4141 "failed to allocate IU buffers\n");
4142 goto error;
4143 }
4144
4145 sg_chain_buffer = dma_alloc_coherent(dev,
4146 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4147 GFP_KERNEL);
4148
4149 if (!sg_chain_buffer) {
4150 dev_err(&ctrl_info->pci_dev->dev,
4151 "failed to allocate PQI scatter-gather chain buffers\n");
4152 goto error;
4153 }
4154
4155 io_request->index = i;
4156 io_request->sg_chain_buffer = sg_chain_buffer;
4157 io_request->sg_chain_buffer_dma_handle =
4158 sg_chain_buffer_dma_handle;
4159 io_request++;
4160 }
4161
4162 return 0;
4163
4164error:
4165 pqi_free_all_io_requests(ctrl_info);
4166
4167 return -ENOMEM;
4168}
4169
4170/*
4171 * Calculate required resources that are sized based on max. outstanding
4172 * requests and max. transfer size.
4173 */
4174
4175static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4176{
4177 u32 max_transfer_size;
4178 u32 max_sg_entries;
4179
4180 ctrl_info->scsi_ml_can_queue =
4181 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4182 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4183
4184 ctrl_info->error_buffer_length =
4185 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4186
4187 max_transfer_size =
4188 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4189
4190 max_sg_entries = max_transfer_size / PAGE_SIZE;
4191
4192 /* +1 to cover when the buffer is not page-aligned. */
4193 max_sg_entries++;
4194
4195 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4196
4197 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4198
4199 ctrl_info->sg_chain_buffer_length =
4200 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4201 ctrl_info->sg_tablesize = max_sg_entries;
4202 ctrl_info->max_sectors = max_transfer_size / 512;
4203}
4204
4205static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4206{
4207 int num_cpus;
4208 int max_queue_groups;
4209 int num_queue_groups;
4210 u16 num_elements_per_iq;
4211 u16 num_elements_per_oq;
4212
4213 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4214 ctrl_info->max_outbound_queues - 1);
4215 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4216
4217 num_cpus = num_online_cpus();
4218 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4219 num_queue_groups = min(num_queue_groups, max_queue_groups);
4220
4221 ctrl_info->num_queue_groups = num_queue_groups;
4222
Kevin Barnett77668f42016-08-31 14:54:23 -05004223 /*
4224 * Make sure that the max. inbound IU length is an even multiple
4225 * of our inbound element length.
4226 */
4227 ctrl_info->max_inbound_iu_length =
4228 (ctrl_info->max_inbound_iu_length_per_firmware /
4229 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4230 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004231
4232 num_elements_per_iq =
4233 (ctrl_info->max_inbound_iu_length /
4234 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4235
4236 /* Add one because one element in each queue is unusable. */
4237 num_elements_per_iq++;
4238
4239 num_elements_per_iq = min(num_elements_per_iq,
4240 ctrl_info->max_elements_per_iq);
4241
4242 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4243 num_elements_per_oq = min(num_elements_per_oq,
4244 ctrl_info->max_elements_per_oq);
4245
4246 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4247 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4248
4249 ctrl_info->max_sg_per_iu =
4250 ((ctrl_info->max_inbound_iu_length -
4251 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4252 sizeof(struct pqi_sg_descriptor)) +
4253 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4254}
4255
4256static inline void pqi_set_sg_descriptor(
4257 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4258{
4259 u64 address = (u64)sg_dma_address(sg);
4260 unsigned int length = sg_dma_len(sg);
4261
4262 put_unaligned_le64(address, &sg_descriptor->address);
4263 put_unaligned_le32(length, &sg_descriptor->length);
4264 put_unaligned_le32(0, &sg_descriptor->flags);
4265}
4266
4267static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4268 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4269 struct pqi_io_request *io_request)
4270{
4271 int i;
4272 u16 iu_length;
4273 int sg_count;
4274 bool chained;
4275 unsigned int num_sg_in_iu;
4276 unsigned int max_sg_per_iu;
4277 struct scatterlist *sg;
4278 struct pqi_sg_descriptor *sg_descriptor;
4279
4280 sg_count = scsi_dma_map(scmd);
4281 if (sg_count < 0)
4282 return sg_count;
4283
4284 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4285 PQI_REQUEST_HEADER_LENGTH;
4286
4287 if (sg_count == 0)
4288 goto out;
4289
4290 sg = scsi_sglist(scmd);
4291 sg_descriptor = request->sg_descriptors;
4292 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4293 chained = false;
4294 num_sg_in_iu = 0;
4295 i = 0;
4296
4297 while (1) {
4298 pqi_set_sg_descriptor(sg_descriptor, sg);
4299 if (!chained)
4300 num_sg_in_iu++;
4301 i++;
4302 if (i == sg_count)
4303 break;
4304 sg_descriptor++;
4305 if (i == max_sg_per_iu) {
4306 put_unaligned_le64(
4307 (u64)io_request->sg_chain_buffer_dma_handle,
4308 &sg_descriptor->address);
4309 put_unaligned_le32((sg_count - num_sg_in_iu)
4310 * sizeof(*sg_descriptor),
4311 &sg_descriptor->length);
4312 put_unaligned_le32(CISS_SG_CHAIN,
4313 &sg_descriptor->flags);
4314 chained = true;
4315 num_sg_in_iu++;
4316 sg_descriptor = io_request->sg_chain_buffer;
4317 }
4318 sg = sg_next(sg);
4319 }
4320
4321 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4322 request->partial = chained;
4323 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4324
4325out:
4326 put_unaligned_le16(iu_length, &request->header.iu_length);
4327
4328 return 0;
4329}
4330
4331static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4332 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4333 struct pqi_io_request *io_request)
4334{
4335 int i;
4336 u16 iu_length;
4337 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004338 bool chained;
4339 unsigned int num_sg_in_iu;
4340 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004341 struct scatterlist *sg;
4342 struct pqi_sg_descriptor *sg_descriptor;
4343
4344 sg_count = scsi_dma_map(scmd);
4345 if (sg_count < 0)
4346 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004347
4348 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4349 PQI_REQUEST_HEADER_LENGTH;
4350 num_sg_in_iu = 0;
4351
Kevin Barnett6c223762016-06-27 16:41:00 -05004352 if (sg_count == 0)
4353 goto out;
4354
Kevin Barnetta60eec02016-08-31 14:54:11 -05004355 sg = scsi_sglist(scmd);
4356 sg_descriptor = request->sg_descriptors;
4357 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4358 chained = false;
4359 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004360
Kevin Barnetta60eec02016-08-31 14:54:11 -05004361 while (1) {
4362 pqi_set_sg_descriptor(sg_descriptor, sg);
4363 if (!chained)
4364 num_sg_in_iu++;
4365 i++;
4366 if (i == sg_count)
4367 break;
4368 sg_descriptor++;
4369 if (i == max_sg_per_iu) {
4370 put_unaligned_le64(
4371 (u64)io_request->sg_chain_buffer_dma_handle,
4372 &sg_descriptor->address);
4373 put_unaligned_le32((sg_count - num_sg_in_iu)
4374 * sizeof(*sg_descriptor),
4375 &sg_descriptor->length);
4376 put_unaligned_le32(CISS_SG_CHAIN,
4377 &sg_descriptor->flags);
4378 chained = true;
4379 num_sg_in_iu++;
4380 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004381 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004382 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004383 }
4384
Kevin Barnetta60eec02016-08-31 14:54:11 -05004385 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4386 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004387 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004388
4389out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004390 put_unaligned_le16(iu_length, &request->header.iu_length);
4391 request->num_sg_descriptors = num_sg_in_iu;
4392
4393 return 0;
4394}
4395
4396static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4397 void *context)
4398{
4399 struct scsi_cmnd *scmd;
4400
4401 scmd = io_request->scmd;
4402 pqi_free_io_request(io_request);
4403 scsi_dma_unmap(scmd);
4404 pqi_scsi_done(scmd);
4405}
4406
4407static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4408 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4409 struct pqi_queue_group *queue_group)
4410{
4411 int rc;
4412 size_t cdb_length;
4413 struct pqi_io_request *io_request;
4414 struct pqi_raid_path_request *request;
4415
4416 io_request = pqi_alloc_io_request(ctrl_info);
4417 io_request->io_complete_callback = pqi_raid_io_complete;
4418 io_request->scmd = scmd;
4419
4420 scmd->host_scribble = (unsigned char *)io_request;
4421
4422 request = io_request->iu;
4423 memset(request, 0,
4424 offsetof(struct pqi_raid_path_request, sg_descriptors));
4425
4426 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4427 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4428 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4429 put_unaligned_le16(io_request->index, &request->request_id);
4430 request->error_index = request->request_id;
4431 memcpy(request->lun_number, device->scsi3addr,
4432 sizeof(request->lun_number));
4433
4434 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4435 memcpy(request->cdb, scmd->cmnd, cdb_length);
4436
4437 switch (cdb_length) {
4438 case 6:
4439 case 10:
4440 case 12:
4441 case 16:
4442 /* No bytes in the Additional CDB bytes field */
4443 request->additional_cdb_bytes_usage =
4444 SOP_ADDITIONAL_CDB_BYTES_0;
4445 break;
4446 case 20:
4447 /* 4 bytes in the Additional cdb field */
4448 request->additional_cdb_bytes_usage =
4449 SOP_ADDITIONAL_CDB_BYTES_4;
4450 break;
4451 case 24:
4452 /* 8 bytes in the Additional cdb field */
4453 request->additional_cdb_bytes_usage =
4454 SOP_ADDITIONAL_CDB_BYTES_8;
4455 break;
4456 case 28:
4457 /* 12 bytes in the Additional cdb field */
4458 request->additional_cdb_bytes_usage =
4459 SOP_ADDITIONAL_CDB_BYTES_12;
4460 break;
4461 case 32:
4462 default:
4463 /* 16 bytes in the Additional cdb field */
4464 request->additional_cdb_bytes_usage =
4465 SOP_ADDITIONAL_CDB_BYTES_16;
4466 break;
4467 }
4468
4469 switch (scmd->sc_data_direction) {
4470 case DMA_TO_DEVICE:
4471 request->data_direction = SOP_READ_FLAG;
4472 break;
4473 case DMA_FROM_DEVICE:
4474 request->data_direction = SOP_WRITE_FLAG;
4475 break;
4476 case DMA_NONE:
4477 request->data_direction = SOP_NO_DIRECTION_FLAG;
4478 break;
4479 case DMA_BIDIRECTIONAL:
4480 request->data_direction = SOP_BIDIRECTIONAL;
4481 break;
4482 default:
4483 dev_err(&ctrl_info->pci_dev->dev,
4484 "unknown data direction: %d\n",
4485 scmd->sc_data_direction);
4486 WARN_ON(scmd->sc_data_direction);
4487 break;
4488 }
4489
4490 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4491 if (rc) {
4492 pqi_free_io_request(io_request);
4493 return SCSI_MLQUEUE_HOST_BUSY;
4494 }
4495
4496 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4497
4498 return 0;
4499}
4500
4501static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4502 void *context)
4503{
4504 struct scsi_cmnd *scmd;
4505
4506 scmd = io_request->scmd;
4507 scsi_dma_unmap(scmd);
4508 if (io_request->status == -EAGAIN)
4509 set_host_byte(scmd, DID_IMM_RETRY);
4510 pqi_free_io_request(io_request);
4511 pqi_scsi_done(scmd);
4512}
4513
4514static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4515 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4516 struct pqi_queue_group *queue_group)
4517{
4518 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4519 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4520}
4521
4522static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4523 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4524 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4525 struct pqi_encryption_info *encryption_info)
4526{
4527 int rc;
4528 struct pqi_io_request *io_request;
4529 struct pqi_aio_path_request *request;
4530
4531 io_request = pqi_alloc_io_request(ctrl_info);
4532 io_request->io_complete_callback = pqi_aio_io_complete;
4533 io_request->scmd = scmd;
4534
4535 scmd->host_scribble = (unsigned char *)io_request;
4536
4537 request = io_request->iu;
4538 memset(request, 0,
4539 offsetof(struct pqi_raid_path_request, sg_descriptors));
4540
4541 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4542 put_unaligned_le32(aio_handle, &request->nexus_id);
4543 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4544 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4545 put_unaligned_le16(io_request->index, &request->request_id);
4546 request->error_index = request->request_id;
4547 if (cdb_length > sizeof(request->cdb))
4548 cdb_length = sizeof(request->cdb);
4549 request->cdb_length = cdb_length;
4550 memcpy(request->cdb, cdb, cdb_length);
4551
4552 switch (scmd->sc_data_direction) {
4553 case DMA_TO_DEVICE:
4554 request->data_direction = SOP_READ_FLAG;
4555 break;
4556 case DMA_FROM_DEVICE:
4557 request->data_direction = SOP_WRITE_FLAG;
4558 break;
4559 case DMA_NONE:
4560 request->data_direction = SOP_NO_DIRECTION_FLAG;
4561 break;
4562 case DMA_BIDIRECTIONAL:
4563 request->data_direction = SOP_BIDIRECTIONAL;
4564 break;
4565 default:
4566 dev_err(&ctrl_info->pci_dev->dev,
4567 "unknown data direction: %d\n",
4568 scmd->sc_data_direction);
4569 WARN_ON(scmd->sc_data_direction);
4570 break;
4571 }
4572
4573 if (encryption_info) {
4574 request->encryption_enable = true;
4575 put_unaligned_le16(encryption_info->data_encryption_key_index,
4576 &request->data_encryption_key_index);
4577 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4578 &request->encrypt_tweak_lower);
4579 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4580 &request->encrypt_tweak_upper);
4581 }
4582
4583 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4584 if (rc) {
4585 pqi_free_io_request(io_request);
4586 return SCSI_MLQUEUE_HOST_BUSY;
4587 }
4588
4589 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4590
4591 return 0;
4592}
4593
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004594/*
4595 * This function gets called just before we hand the completed SCSI request
4596 * back to the SML.
4597 */
4598
4599void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4600{
4601 struct pqi_scsi_dev *device;
4602
4603 device = scmd->device->hostdata;
4604 atomic_dec(&device->scsi_cmds_outstanding);
4605}
4606
Kevin Barnett6c223762016-06-27 16:41:00 -05004607static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004608 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05004609{
4610 int rc;
4611 struct pqi_ctrl_info *ctrl_info;
4612 struct pqi_scsi_dev *device;
4613 u16 hwq;
4614 struct pqi_queue_group *queue_group;
4615 bool raid_bypassed;
4616
4617 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05004618 ctrl_info = shost_to_hba(shost);
4619
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004620 atomic_inc(&device->scsi_cmds_outstanding);
4621
Kevin Barnett6c223762016-06-27 16:41:00 -05004622 if (pqi_ctrl_offline(ctrl_info)) {
4623 set_host_byte(scmd, DID_NO_CONNECT);
4624 pqi_scsi_done(scmd);
4625 return 0;
4626 }
4627
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004628 pqi_ctrl_busy(ctrl_info);
4629 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4630 rc = SCSI_MLQUEUE_HOST_BUSY;
4631 goto out;
4632 }
4633
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004634 /*
4635 * This is necessary because the SML doesn't zero out this field during
4636 * error recovery.
4637 */
4638 scmd->result = 0;
4639
Kevin Barnett6c223762016-06-27 16:41:00 -05004640 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4641 if (hwq >= ctrl_info->num_queue_groups)
4642 hwq = 0;
4643
4644 queue_group = &ctrl_info->queue_groups[hwq];
4645
4646 if (pqi_is_logical_device(device)) {
4647 raid_bypassed = false;
4648 if (device->offload_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01004649 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05004650 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4651 scmd, queue_group);
4652 if (rc == 0 ||
4653 rc == SCSI_MLQUEUE_HOST_BUSY ||
4654 rc == SAM_STAT_CHECK_CONDITION ||
4655 rc == SAM_STAT_RESERVATION_CONFLICT)
4656 raid_bypassed = true;
4657 }
4658 if (!raid_bypassed)
4659 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4660 queue_group);
4661 } else {
4662 if (device->aio_enabled)
4663 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4664 queue_group);
4665 else
4666 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4667 queue_group);
4668 }
4669
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004670out:
4671 pqi_ctrl_unbusy(ctrl_info);
4672 if (rc)
4673 atomic_dec(&device->scsi_cmds_outstanding);
4674
Kevin Barnett6c223762016-06-27 16:41:00 -05004675 return rc;
4676}
4677
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004678static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4679 struct pqi_queue_group *queue_group)
4680{
4681 unsigned int path;
4682 unsigned long flags;
4683 bool list_is_empty;
4684
4685 for (path = 0; path < 2; path++) {
4686 while (1) {
4687 spin_lock_irqsave(
4688 &queue_group->submit_lock[path], flags);
4689 list_is_empty =
4690 list_empty(&queue_group->request_list[path]);
4691 spin_unlock_irqrestore(
4692 &queue_group->submit_lock[path], flags);
4693 if (list_is_empty)
4694 break;
4695 pqi_check_ctrl_health(ctrl_info);
4696 if (pqi_ctrl_offline(ctrl_info))
4697 return -ENXIO;
4698 usleep_range(1000, 2000);
4699 }
4700 }
4701
4702 return 0;
4703}
4704
4705static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4706{
4707 int rc;
4708 unsigned int i;
4709 unsigned int path;
4710 struct pqi_queue_group *queue_group;
4711 pqi_index_t iq_pi;
4712 pqi_index_t iq_ci;
4713
4714 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4715 queue_group = &ctrl_info->queue_groups[i];
4716
4717 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4718 if (rc)
4719 return rc;
4720
4721 for (path = 0; path < 2; path++) {
4722 iq_pi = queue_group->iq_pi_copy[path];
4723
4724 while (1) {
4725 iq_ci = *queue_group->iq_ci[path];
4726 if (iq_ci == iq_pi)
4727 break;
4728 pqi_check_ctrl_health(ctrl_info);
4729 if (pqi_ctrl_offline(ctrl_info))
4730 return -ENXIO;
4731 usleep_range(1000, 2000);
4732 }
4733 }
4734 }
4735
4736 return 0;
4737}
4738
4739static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
4740 struct pqi_scsi_dev *device)
4741{
4742 unsigned int i;
4743 unsigned int path;
4744 struct pqi_queue_group *queue_group;
4745 unsigned long flags;
4746 struct pqi_io_request *io_request;
4747 struct pqi_io_request *next;
4748 struct scsi_cmnd *scmd;
4749 struct pqi_scsi_dev *scsi_device;
4750
4751 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4752 queue_group = &ctrl_info->queue_groups[i];
4753
4754 for (path = 0; path < 2; path++) {
4755 spin_lock_irqsave(
4756 &queue_group->submit_lock[path], flags);
4757
4758 list_for_each_entry_safe(io_request, next,
4759 &queue_group->request_list[path],
4760 request_list_entry) {
4761 scmd = io_request->scmd;
4762 if (!scmd)
4763 continue;
4764
4765 scsi_device = scmd->device->hostdata;
4766 if (scsi_device != device)
4767 continue;
4768
4769 list_del(&io_request->request_list_entry);
4770 set_host_byte(scmd, DID_RESET);
4771 pqi_scsi_done(scmd);
4772 }
4773
4774 spin_unlock_irqrestore(
4775 &queue_group->submit_lock[path], flags);
4776 }
4777 }
4778}
4779
Kevin Barnett14bb2152016-08-31 14:54:35 -05004780static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004781 void *context)
4782{
4783 struct completion *waiting = context;
4784
4785 complete(waiting);
4786}
4787
Kevin Barnett14bb2152016-08-31 14:54:35 -05004788#define PQI_LUN_RESET_TIMEOUT_SECS 10
4789
4790static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4791 struct pqi_scsi_dev *device, struct completion *wait)
4792{
4793 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05004794
4795 while (1) {
4796 if (wait_for_completion_io_timeout(wait,
4797 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4798 rc = 0;
4799 break;
4800 }
4801
4802 pqi_check_ctrl_health(ctrl_info);
4803 if (pqi_ctrl_offline(ctrl_info)) {
4804 rc = -ETIMEDOUT;
4805 break;
4806 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05004807 }
4808
4809 return rc;
4810}
4811
4812static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05004813 struct pqi_scsi_dev *device)
4814{
4815 int rc;
4816 struct pqi_io_request *io_request;
4817 DECLARE_COMPLETION_ONSTACK(wait);
4818 struct pqi_task_management_request *request;
4819
Kevin Barnett6c223762016-06-27 16:41:00 -05004820 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05004821 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05004822 io_request->context = &wait;
4823
4824 request = io_request->iu;
4825 memset(request, 0, sizeof(*request));
4826
4827 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4828 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4829 &request->header.iu_length);
4830 put_unaligned_le16(io_request->index, &request->request_id);
4831 memcpy(request->lun_number, device->scsi3addr,
4832 sizeof(request->lun_number));
4833 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4834
4835 pqi_start_io(ctrl_info,
4836 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4837 io_request);
4838
Kevin Barnett14bb2152016-08-31 14:54:35 -05004839 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4840 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05004841 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05004842
4843 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05004844
4845 return rc;
4846}
4847
4848/* Performs a reset at the LUN level. */
4849
4850static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4851 struct pqi_scsi_dev *device)
4852{
4853 int rc;
4854
Kevin Barnett14bb2152016-08-31 14:54:35 -05004855 rc = pqi_lun_reset(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05004856
Kevin Barnett14bb2152016-08-31 14:54:35 -05004857 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05004858}
4859
4860static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4861{
4862 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004863 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05004864 struct pqi_ctrl_info *ctrl_info;
4865 struct pqi_scsi_dev *device;
4866
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004867 shost = scmd->device->host;
4868 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05004869 device = scmd->device->hostdata;
4870
4871 dev_err(&ctrl_info->pci_dev->dev,
4872 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004873 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05004874
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004875 pqi_check_ctrl_health(ctrl_info);
4876 if (pqi_ctrl_offline(ctrl_info)) {
4877 rc = FAILED;
4878 goto out;
4879 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004880
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004881 mutex_lock(&ctrl_info->lun_reset_mutex);
4882
4883 pqi_ctrl_block_requests(ctrl_info);
4884 pqi_ctrl_wait_until_quiesced(ctrl_info);
4885 pqi_fail_io_queued_for_device(ctrl_info, device);
4886 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
4887 pqi_device_reset_start(device);
4888 pqi_ctrl_unblock_requests(ctrl_info);
4889
4890 if (rc)
4891 rc = FAILED;
4892 else
4893 rc = pqi_device_reset(ctrl_info, device);
4894
4895 pqi_device_reset_done(device);
4896
4897 mutex_unlock(&ctrl_info->lun_reset_mutex);
4898
4899out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004900 dev_err(&ctrl_info->pci_dev->dev,
4901 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004902 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05004903 rc == SUCCESS ? "SUCCESS" : "FAILED");
4904
4905 return rc;
4906}
4907
4908static int pqi_slave_alloc(struct scsi_device *sdev)
4909{
4910 struct pqi_scsi_dev *device;
4911 unsigned long flags;
4912 struct pqi_ctrl_info *ctrl_info;
4913 struct scsi_target *starget;
4914 struct sas_rphy *rphy;
4915
4916 ctrl_info = shost_to_hba(sdev->host);
4917
4918 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4919
4920 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4921 starget = scsi_target(sdev);
4922 rphy = target_to_rphy(starget);
4923 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4924 if (device) {
4925 device->target = sdev_id(sdev);
4926 device->lun = sdev->lun;
4927 device->target_lun_valid = true;
4928 }
4929 } else {
4930 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4931 sdev_id(sdev), sdev->lun);
4932 }
4933
4934 if (device && device->expose_device) {
4935 sdev->hostdata = device;
4936 device->sdev = sdev;
4937 if (device->queue_depth) {
4938 device->advertised_queue_depth = device->queue_depth;
4939 scsi_change_queue_depth(sdev,
4940 device->advertised_queue_depth);
4941 }
4942 }
4943
4944 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4945
4946 return 0;
4947}
4948
4949static int pqi_slave_configure(struct scsi_device *sdev)
4950{
4951 struct pqi_scsi_dev *device;
4952
4953 device = sdev->hostdata;
4954 if (!device->expose_device)
4955 sdev->no_uld_attach = true;
4956
4957 return 0;
4958}
4959
Christoph Hellwig52198222016-11-01 08:12:49 -06004960static int pqi_map_queues(struct Scsi_Host *shost)
4961{
4962 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
4963
4964 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
4965}
4966
Kevin Barnett6c223762016-06-27 16:41:00 -05004967static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4968 void __user *arg)
4969{
4970 struct pci_dev *pci_dev;
4971 u32 subsystem_vendor;
4972 u32 subsystem_device;
4973 cciss_pci_info_struct pciinfo;
4974
4975 if (!arg)
4976 return -EINVAL;
4977
4978 pci_dev = ctrl_info->pci_dev;
4979
4980 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4981 pciinfo.bus = pci_dev->bus->number;
4982 pciinfo.dev_fn = pci_dev->devfn;
4983 subsystem_vendor = pci_dev->subsystem_vendor;
4984 subsystem_device = pci_dev->subsystem_device;
4985 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4986 subsystem_vendor;
4987
4988 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4989 return -EFAULT;
4990
4991 return 0;
4992}
4993
4994static int pqi_getdrivver_ioctl(void __user *arg)
4995{
4996 u32 version;
4997
4998 if (!arg)
4999 return -EINVAL;
5000
5001 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5002 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5003
5004 if (copy_to_user(arg, &version, sizeof(version)))
5005 return -EFAULT;
5006
5007 return 0;
5008}
5009
5010struct ciss_error_info {
5011 u8 scsi_status;
5012 int command_status;
5013 size_t sense_data_length;
5014};
5015
5016static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5017 struct ciss_error_info *ciss_error_info)
5018{
5019 int ciss_cmd_status;
5020 size_t sense_data_length;
5021
5022 switch (pqi_error_info->data_out_result) {
5023 case PQI_DATA_IN_OUT_GOOD:
5024 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5025 break;
5026 case PQI_DATA_IN_OUT_UNDERFLOW:
5027 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5028 break;
5029 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5030 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5031 break;
5032 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5033 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5034 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5035 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5036 case PQI_DATA_IN_OUT_ERROR:
5037 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5038 break;
5039 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5040 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5041 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5042 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5043 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5044 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5045 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5046 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5047 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5048 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5049 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5050 break;
5051 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5052 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5053 break;
5054 case PQI_DATA_IN_OUT_ABORTED:
5055 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5056 break;
5057 case PQI_DATA_IN_OUT_TIMEOUT:
5058 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5059 break;
5060 default:
5061 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5062 break;
5063 }
5064
5065 sense_data_length =
5066 get_unaligned_le16(&pqi_error_info->sense_data_length);
5067 if (sense_data_length == 0)
5068 sense_data_length =
5069 get_unaligned_le16(&pqi_error_info->response_data_length);
5070 if (sense_data_length)
5071 if (sense_data_length > sizeof(pqi_error_info->data))
5072 sense_data_length = sizeof(pqi_error_info->data);
5073
5074 ciss_error_info->scsi_status = pqi_error_info->status;
5075 ciss_error_info->command_status = ciss_cmd_status;
5076 ciss_error_info->sense_data_length = sense_data_length;
5077}
5078
5079static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5080{
5081 int rc;
5082 char *kernel_buffer = NULL;
5083 u16 iu_length;
5084 size_t sense_data_length;
5085 IOCTL_Command_struct iocommand;
5086 struct pqi_raid_path_request request;
5087 struct pqi_raid_error_info pqi_error_info;
5088 struct ciss_error_info ciss_error_info;
5089
5090 if (pqi_ctrl_offline(ctrl_info))
5091 return -ENXIO;
5092 if (!arg)
5093 return -EINVAL;
5094 if (!capable(CAP_SYS_RAWIO))
5095 return -EPERM;
5096 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5097 return -EFAULT;
5098 if (iocommand.buf_size < 1 &&
5099 iocommand.Request.Type.Direction != XFER_NONE)
5100 return -EINVAL;
5101 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5102 return -EINVAL;
5103 if (iocommand.Request.Type.Type != TYPE_CMD)
5104 return -EINVAL;
5105
5106 switch (iocommand.Request.Type.Direction) {
5107 case XFER_NONE:
5108 case XFER_WRITE:
5109 case XFER_READ:
5110 break;
5111 default:
5112 return -EINVAL;
5113 }
5114
5115 if (iocommand.buf_size > 0) {
5116 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5117 if (!kernel_buffer)
5118 return -ENOMEM;
5119 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5120 if (copy_from_user(kernel_buffer, iocommand.buf,
5121 iocommand.buf_size)) {
5122 rc = -EFAULT;
5123 goto out;
5124 }
5125 } else {
5126 memset(kernel_buffer, 0, iocommand.buf_size);
5127 }
5128 }
5129
5130 memset(&request, 0, sizeof(request));
5131
5132 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5133 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5134 PQI_REQUEST_HEADER_LENGTH;
5135 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5136 sizeof(request.lun_number));
5137 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5138 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5139
5140 switch (iocommand.Request.Type.Direction) {
5141 case XFER_NONE:
5142 request.data_direction = SOP_NO_DIRECTION_FLAG;
5143 break;
5144 case XFER_WRITE:
5145 request.data_direction = SOP_WRITE_FLAG;
5146 break;
5147 case XFER_READ:
5148 request.data_direction = SOP_READ_FLAG;
5149 break;
5150 }
5151
5152 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5153
5154 if (iocommand.buf_size > 0) {
5155 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5156
5157 rc = pqi_map_single(ctrl_info->pci_dev,
5158 &request.sg_descriptors[0], kernel_buffer,
5159 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5160 if (rc)
5161 goto out;
5162
5163 iu_length += sizeof(request.sg_descriptors[0]);
5164 }
5165
5166 put_unaligned_le16(iu_length, &request.header.iu_length);
5167
5168 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5169 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5170
5171 if (iocommand.buf_size > 0)
5172 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5173 PCI_DMA_BIDIRECTIONAL);
5174
5175 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5176
5177 if (rc == 0) {
5178 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5179 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5180 iocommand.error_info.CommandStatus =
5181 ciss_error_info.command_status;
5182 sense_data_length = ciss_error_info.sense_data_length;
5183 if (sense_data_length) {
5184 if (sense_data_length >
5185 sizeof(iocommand.error_info.SenseInfo))
5186 sense_data_length =
5187 sizeof(iocommand.error_info.SenseInfo);
5188 memcpy(iocommand.error_info.SenseInfo,
5189 pqi_error_info.data, sense_data_length);
5190 iocommand.error_info.SenseLen = sense_data_length;
5191 }
5192 }
5193
5194 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5195 rc = -EFAULT;
5196 goto out;
5197 }
5198
5199 if (rc == 0 && iocommand.buf_size > 0 &&
5200 (iocommand.Request.Type.Direction & XFER_READ)) {
5201 if (copy_to_user(iocommand.buf, kernel_buffer,
5202 iocommand.buf_size)) {
5203 rc = -EFAULT;
5204 }
5205 }
5206
5207out:
5208 kfree(kernel_buffer);
5209
5210 return rc;
5211}
5212
5213static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5214{
5215 int rc;
5216 struct pqi_ctrl_info *ctrl_info;
5217
5218 ctrl_info = shost_to_hba(sdev->host);
5219
5220 switch (cmd) {
5221 case CCISS_DEREGDISK:
5222 case CCISS_REGNEWDISK:
5223 case CCISS_REGNEWD:
5224 rc = pqi_scan_scsi_devices(ctrl_info);
5225 break;
5226 case CCISS_GETPCIINFO:
5227 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5228 break;
5229 case CCISS_GETDRIVVER:
5230 rc = pqi_getdrivver_ioctl(arg);
5231 break;
5232 case CCISS_PASSTHRU:
5233 rc = pqi_passthru_ioctl(ctrl_info, arg);
5234 break;
5235 default:
5236 rc = -EINVAL;
5237 break;
5238 }
5239
5240 return rc;
5241}
5242
5243static ssize_t pqi_version_show(struct device *dev,
5244 struct device_attribute *attr, char *buffer)
5245{
5246 ssize_t count = 0;
5247 struct Scsi_Host *shost;
5248 struct pqi_ctrl_info *ctrl_info;
5249
5250 shost = class_to_shost(dev);
5251 ctrl_info = shost_to_hba(shost);
5252
5253 count += snprintf(buffer + count, PAGE_SIZE - count,
5254 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5255
5256 count += snprintf(buffer + count, PAGE_SIZE - count,
5257 "firmware: %s\n", ctrl_info->firmware_version);
5258
5259 return count;
5260}
5261
5262static ssize_t pqi_host_rescan_store(struct device *dev,
5263 struct device_attribute *attr, const char *buffer, size_t count)
5264{
5265 struct Scsi_Host *shost = class_to_shost(dev);
5266
5267 pqi_scan_start(shost);
5268
5269 return count;
5270}
5271
5272static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5273static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5274
5275static struct device_attribute *pqi_shost_attrs[] = {
5276 &dev_attr_version,
5277 &dev_attr_rescan,
5278 NULL
5279};
5280
5281static ssize_t pqi_sas_address_show(struct device *dev,
5282 struct device_attribute *attr, char *buffer)
5283{
5284 struct pqi_ctrl_info *ctrl_info;
5285 struct scsi_device *sdev;
5286 struct pqi_scsi_dev *device;
5287 unsigned long flags;
5288 u64 sas_address;
5289
5290 sdev = to_scsi_device(dev);
5291 ctrl_info = shost_to_hba(sdev->host);
5292
5293 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5294
5295 device = sdev->hostdata;
5296 if (pqi_is_logical_device(device)) {
5297 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5298 flags);
5299 return -ENODEV;
5300 }
5301 sas_address = device->sas_address;
5302
5303 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5304
5305 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5306}
5307
5308static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5309 struct device_attribute *attr, char *buffer)
5310{
5311 struct pqi_ctrl_info *ctrl_info;
5312 struct scsi_device *sdev;
5313 struct pqi_scsi_dev *device;
5314 unsigned long flags;
5315
5316 sdev = to_scsi_device(dev);
5317 ctrl_info = shost_to_hba(sdev->host);
5318
5319 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5320
5321 device = sdev->hostdata;
5322 buffer[0] = device->offload_enabled ? '1' : '0';
5323 buffer[1] = '\n';
5324 buffer[2] = '\0';
5325
5326 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5327
5328 return 2;
5329}
5330
5331static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5332static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5333 pqi_ssd_smart_path_enabled_show, NULL);
5334
5335static struct device_attribute *pqi_sdev_attrs[] = {
5336 &dev_attr_sas_address,
5337 &dev_attr_ssd_smart_path_enabled,
5338 NULL
5339};
5340
5341static struct scsi_host_template pqi_driver_template = {
5342 .module = THIS_MODULE,
5343 .name = DRIVER_NAME_SHORT,
5344 .proc_name = DRIVER_NAME_SHORT,
5345 .queuecommand = pqi_scsi_queue_command,
5346 .scan_start = pqi_scan_start,
5347 .scan_finished = pqi_scan_finished,
5348 .this_id = -1,
5349 .use_clustering = ENABLE_CLUSTERING,
5350 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5351 .ioctl = pqi_ioctl,
5352 .slave_alloc = pqi_slave_alloc,
5353 .slave_configure = pqi_slave_configure,
Christoph Hellwig52198222016-11-01 08:12:49 -06005354 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05005355 .sdev_attrs = pqi_sdev_attrs,
5356 .shost_attrs = pqi_shost_attrs,
5357};
5358
5359static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5360{
5361 int rc;
5362 struct Scsi_Host *shost;
5363
5364 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5365 if (!shost) {
5366 dev_err(&ctrl_info->pci_dev->dev,
5367 "scsi_host_alloc failed for controller %u\n",
5368 ctrl_info->ctrl_id);
5369 return -ENOMEM;
5370 }
5371
5372 shost->io_port = 0;
5373 shost->n_io_port = 0;
5374 shost->this_id = -1;
5375 shost->max_channel = PQI_MAX_BUS;
5376 shost->max_cmd_len = MAX_COMMAND_SIZE;
5377 shost->max_lun = ~0;
5378 shost->max_id = ~0;
5379 shost->max_sectors = ctrl_info->max_sectors;
5380 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5381 shost->cmd_per_lun = shost->can_queue;
5382 shost->sg_tablesize = ctrl_info->sg_tablesize;
5383 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06005384 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05005385 shost->unique_id = shost->irq;
5386 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5387 shost->hostdata[0] = (unsigned long)ctrl_info;
5388
5389 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5390 if (rc) {
5391 dev_err(&ctrl_info->pci_dev->dev,
5392 "scsi_add_host failed for controller %u\n",
5393 ctrl_info->ctrl_id);
5394 goto free_host;
5395 }
5396
5397 rc = pqi_add_sas_host(shost, ctrl_info);
5398 if (rc) {
5399 dev_err(&ctrl_info->pci_dev->dev,
5400 "add SAS host failed for controller %u\n",
5401 ctrl_info->ctrl_id);
5402 goto remove_host;
5403 }
5404
5405 ctrl_info->scsi_host = shost;
5406
5407 return 0;
5408
5409remove_host:
5410 scsi_remove_host(shost);
5411free_host:
5412 scsi_host_put(shost);
5413
5414 return rc;
5415}
5416
5417static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5418{
5419 struct Scsi_Host *shost;
5420
5421 pqi_delete_sas_host(ctrl_info);
5422
5423 shost = ctrl_info->scsi_host;
5424 if (!shost)
5425 return;
5426
5427 scsi_remove_host(shost);
5428 scsi_host_put(shost);
5429}
5430
5431#define PQI_RESET_ACTION_RESET 0x1
5432
5433#define PQI_RESET_TYPE_NO_RESET 0x0
5434#define PQI_RESET_TYPE_SOFT_RESET 0x1
5435#define PQI_RESET_TYPE_FIRM_RESET 0x2
5436#define PQI_RESET_TYPE_HARD_RESET 0x3
5437
5438static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5439{
5440 int rc;
5441 u32 reset_params;
5442
5443 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5444 PQI_RESET_TYPE_HARD_RESET;
5445
5446 writel(reset_params,
5447 &ctrl_info->pqi_registers->device_reset);
5448
5449 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5450 if (rc)
5451 dev_err(&ctrl_info->pci_dev->dev,
5452 "PQI reset failed\n");
5453
5454 return rc;
5455}
5456
5457static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5458{
5459 int rc;
5460 struct bmic_identify_controller *identify;
5461
5462 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5463 if (!identify)
5464 return -ENOMEM;
5465
5466 rc = pqi_identify_controller(ctrl_info, identify);
5467 if (rc)
5468 goto out;
5469
5470 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5471 sizeof(identify->firmware_version));
5472 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5473 snprintf(ctrl_info->firmware_version +
5474 strlen(ctrl_info->firmware_version),
5475 sizeof(ctrl_info->firmware_version),
5476 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5477
5478out:
5479 kfree(identify);
5480
5481 return rc;
5482}
5483
Kevin Barnett162d7752017-05-03 18:52:46 -05005484/* Switches the controller from PQI mode back into SIS mode. */
5485
5486static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5487{
5488 int rc;
5489
5490 sis_disable_msix(ctrl_info);
5491 rc = pqi_reset(ctrl_info);
5492 if (rc)
5493 return rc;
5494 sis_reenable_sis_mode(ctrl_info);
5495 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5496
5497 return 0;
5498}
5499
5500/*
5501 * If the controller isn't already in SIS mode, this function forces it into
5502 * SIS mode.
5503 */
5504
5505static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05005506{
5507 if (!sis_is_firmware_running(ctrl_info))
5508 return -ENXIO;
5509
Kevin Barnett162d7752017-05-03 18:52:46 -05005510 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5511 return 0;
5512
5513 if (sis_is_kernel_up(ctrl_info)) {
5514 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5515 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05005516 }
5517
Kevin Barnett162d7752017-05-03 18:52:46 -05005518 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05005519}
5520
Kevin Barnett6c223762016-06-27 16:41:00 -05005521static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5522{
5523 int rc;
5524
Kevin Barnett162d7752017-05-03 18:52:46 -05005525 rc = pqi_force_sis_mode(ctrl_info);
5526 if (rc)
5527 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05005528
5529 /*
5530 * Wait until the controller is ready to start accepting SIS
5531 * commands.
5532 */
5533 rc = sis_wait_for_ctrl_ready(ctrl_info);
5534 if (rc) {
5535 dev_err(&ctrl_info->pci_dev->dev,
5536 "error initializing SIS interface\n");
5537 return rc;
5538 }
5539
5540 /*
5541 * Get the controller properties. This allows us to determine
5542 * whether or not it supports PQI mode.
5543 */
5544 rc = sis_get_ctrl_properties(ctrl_info);
5545 if (rc) {
5546 dev_err(&ctrl_info->pci_dev->dev,
5547 "error obtaining controller properties\n");
5548 return rc;
5549 }
5550
5551 rc = sis_get_pqi_capabilities(ctrl_info);
5552 if (rc) {
5553 dev_err(&ctrl_info->pci_dev->dev,
5554 "error obtaining controller capabilities\n");
5555 return rc;
5556 }
5557
5558 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5559 ctrl_info->max_outstanding_requests =
5560 PQI_MAX_OUTSTANDING_REQUESTS;
5561
5562 pqi_calculate_io_resources(ctrl_info);
5563
5564 rc = pqi_alloc_error_buffer(ctrl_info);
5565 if (rc) {
5566 dev_err(&ctrl_info->pci_dev->dev,
5567 "failed to allocate PQI error buffer\n");
5568 return rc;
5569 }
5570
5571 /*
5572 * If the function we are about to call succeeds, the
5573 * controller will transition from legacy SIS mode
5574 * into PQI mode.
5575 */
5576 rc = sis_init_base_struct_addr(ctrl_info);
5577 if (rc) {
5578 dev_err(&ctrl_info->pci_dev->dev,
5579 "error initializing PQI mode\n");
5580 return rc;
5581 }
5582
5583 /* Wait for the controller to complete the SIS -> PQI transition. */
5584 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5585 if (rc) {
5586 dev_err(&ctrl_info->pci_dev->dev,
5587 "transition to PQI mode failed\n");
5588 return rc;
5589 }
5590
5591 /* From here on, we are running in PQI mode. */
5592 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05005593 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05005594
5595 rc = pqi_alloc_admin_queues(ctrl_info);
5596 if (rc) {
5597 dev_err(&ctrl_info->pci_dev->dev,
5598 "error allocating admin queues\n");
5599 return rc;
5600 }
5601
5602 rc = pqi_create_admin_queues(ctrl_info);
5603 if (rc) {
5604 dev_err(&ctrl_info->pci_dev->dev,
5605 "error creating admin queues\n");
5606 return rc;
5607 }
5608
5609 rc = pqi_report_device_capability(ctrl_info);
5610 if (rc) {
5611 dev_err(&ctrl_info->pci_dev->dev,
5612 "obtaining device capability failed\n");
5613 return rc;
5614 }
5615
5616 rc = pqi_validate_device_capability(ctrl_info);
5617 if (rc)
5618 return rc;
5619
5620 pqi_calculate_queue_resources(ctrl_info);
5621
5622 rc = pqi_enable_msix_interrupts(ctrl_info);
5623 if (rc)
5624 return rc;
5625
5626 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5627 ctrl_info->max_msix_vectors =
5628 ctrl_info->num_msix_vectors_enabled;
5629 pqi_calculate_queue_resources(ctrl_info);
5630 }
5631
5632 rc = pqi_alloc_io_resources(ctrl_info);
5633 if (rc)
5634 return rc;
5635
5636 rc = pqi_alloc_operational_queues(ctrl_info);
5637 if (rc)
5638 return rc;
5639
5640 pqi_init_operational_queues(ctrl_info);
5641
5642 rc = pqi_request_irqs(ctrl_info);
5643 if (rc)
5644 return rc;
5645
Kevin Barnett6c223762016-06-27 16:41:00 -05005646 rc = pqi_create_queues(ctrl_info);
5647 if (rc)
5648 return rc;
5649
5650 sis_enable_msix(ctrl_info);
5651
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05005652 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005653 if (rc) {
5654 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05005655 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05005656 return rc;
5657 }
5658
5659 pqi_start_heartbeat_timer(ctrl_info);
5660
5661 ctrl_info->controller_online = true;
5662
5663 /* Register with the SCSI subsystem. */
5664 rc = pqi_register_scsi(ctrl_info);
5665 if (rc)
5666 return rc;
5667
5668 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5669 if (rc) {
5670 dev_err(&ctrl_info->pci_dev->dev,
5671 "error obtaining firmware version\n");
5672 return rc;
5673 }
5674
5675 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5676 if (rc) {
5677 dev_err(&ctrl_info->pci_dev->dev,
5678 "error updating host wellness\n");
5679 return rc;
5680 }
5681
5682 pqi_schedule_update_time_worker(ctrl_info);
5683
5684 pqi_scan_scsi_devices(ctrl_info);
5685
5686 return 0;
5687}
5688
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05005689static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
5690 u16 timeout)
5691{
5692 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
5693 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
5694}
5695
Kevin Barnett6c223762016-06-27 16:41:00 -05005696static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5697{
5698 int rc;
5699 u64 mask;
5700
5701 rc = pci_enable_device(ctrl_info->pci_dev);
5702 if (rc) {
5703 dev_err(&ctrl_info->pci_dev->dev,
5704 "failed to enable PCI device\n");
5705 return rc;
5706 }
5707
5708 if (sizeof(dma_addr_t) > 4)
5709 mask = DMA_BIT_MASK(64);
5710 else
5711 mask = DMA_BIT_MASK(32);
5712
5713 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5714 if (rc) {
5715 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5716 goto disable_device;
5717 }
5718
5719 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5720 if (rc) {
5721 dev_err(&ctrl_info->pci_dev->dev,
5722 "failed to obtain PCI resources\n");
5723 goto disable_device;
5724 }
5725
5726 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5727 ctrl_info->pci_dev, 0),
5728 sizeof(struct pqi_ctrl_registers));
5729 if (!ctrl_info->iomem_base) {
5730 dev_err(&ctrl_info->pci_dev->dev,
5731 "failed to map memory for controller registers\n");
5732 rc = -ENOMEM;
5733 goto release_regions;
5734 }
5735
5736 ctrl_info->registers = ctrl_info->iomem_base;
5737 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5738
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05005739#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
5740
5741 /* Increase the PCIe completion timeout. */
5742 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
5743 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
5744 if (rc) {
5745 dev_err(&ctrl_info->pci_dev->dev,
5746 "failed to set PCIe completion timeout\n");
5747 goto release_regions;
5748 }
5749
Kevin Barnett6c223762016-06-27 16:41:00 -05005750 /* Enable bus mastering. */
5751 pci_set_master(ctrl_info->pci_dev);
5752
5753 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5754
5755 return 0;
5756
5757release_regions:
5758 pci_release_regions(ctrl_info->pci_dev);
5759disable_device:
5760 pci_disable_device(ctrl_info->pci_dev);
5761
5762 return rc;
5763}
5764
5765static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5766{
5767 iounmap(ctrl_info->iomem_base);
5768 pci_release_regions(ctrl_info->pci_dev);
5769 pci_disable_device(ctrl_info->pci_dev);
5770 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5771}
5772
5773static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5774{
5775 struct pqi_ctrl_info *ctrl_info;
5776
5777 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5778 GFP_KERNEL, numa_node);
5779 if (!ctrl_info)
5780 return NULL;
5781
5782 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005783 mutex_init(&ctrl_info->lun_reset_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05005784
5785 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5786 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5787
5788 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5789 atomic_set(&ctrl_info->num_interrupts, 0);
5790
5791 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5792 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5793
5794 sema_init(&ctrl_info->sync_request_sem,
5795 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005796 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05005797
5798 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5799 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5800
5801 return ctrl_info;
5802}
5803
5804static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5805{
5806 kfree(ctrl_info);
5807}
5808
5809static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5810{
Kevin Barnett98bf0612017-05-03 18:52:28 -05005811 pqi_free_irqs(ctrl_info);
5812 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005813}
5814
5815static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5816{
5817 pqi_stop_heartbeat_timer(ctrl_info);
5818 pqi_free_interrupts(ctrl_info);
5819 if (ctrl_info->queue_memory_base)
5820 dma_free_coherent(&ctrl_info->pci_dev->dev,
5821 ctrl_info->queue_memory_length,
5822 ctrl_info->queue_memory_base,
5823 ctrl_info->queue_memory_base_dma_handle);
5824 if (ctrl_info->admin_queue_memory_base)
5825 dma_free_coherent(&ctrl_info->pci_dev->dev,
5826 ctrl_info->admin_queue_memory_length,
5827 ctrl_info->admin_queue_memory_base,
5828 ctrl_info->admin_queue_memory_base_dma_handle);
5829 pqi_free_all_io_requests(ctrl_info);
5830 if (ctrl_info->error_buffer)
5831 dma_free_coherent(&ctrl_info->pci_dev->dev,
5832 ctrl_info->error_buffer_length,
5833 ctrl_info->error_buffer,
5834 ctrl_info->error_buffer_dma_handle);
5835 if (ctrl_info->iomem_base)
5836 pqi_cleanup_pci_init(ctrl_info);
5837 pqi_free_ctrl_info(ctrl_info);
5838}
5839
5840static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5841{
Kevin Barnette57a1f92016-08-31 14:54:47 -05005842 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5843 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5844 pqi_remove_all_scsi_devices(ctrl_info);
5845 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05005846 if (ctrl_info->pqi_mode_enabled)
5847 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005848 pqi_free_ctrl_resources(ctrl_info);
5849}
5850
5851static void pqi_print_ctrl_info(struct pci_dev *pdev,
5852 const struct pci_device_id *id)
5853{
5854 char *ctrl_description;
5855
5856 if (id->driver_data) {
5857 ctrl_description = (char *)id->driver_data;
5858 } else {
5859 switch (id->subvendor) {
5860 case PCI_VENDOR_ID_HP:
5861 ctrl_description = hpe_branded_controller;
5862 break;
5863 case PCI_VENDOR_ID_ADAPTEC2:
5864 default:
5865 ctrl_description = microsemi_branded_controller;
5866 break;
5867 }
5868 }
5869
5870 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5871}
5872
5873static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5874{
5875 int rc;
5876 int node;
5877 struct pqi_ctrl_info *ctrl_info;
5878
5879 pqi_print_ctrl_info(pdev, id);
5880
5881 if (pqi_disable_device_id_wildcards &&
5882 id->subvendor == PCI_ANY_ID &&
5883 id->subdevice == PCI_ANY_ID) {
5884 dev_warn(&pdev->dev,
5885 "controller not probed because device ID wildcards are disabled\n");
5886 return -ENODEV;
5887 }
5888
5889 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5890 dev_warn(&pdev->dev,
5891 "controller device ID matched using wildcards\n");
5892
5893 node = dev_to_node(&pdev->dev);
5894 if (node == NUMA_NO_NODE)
5895 set_dev_node(&pdev->dev, 0);
5896
5897 ctrl_info = pqi_alloc_ctrl_info(node);
5898 if (!ctrl_info) {
5899 dev_err(&pdev->dev,
5900 "failed to allocate controller info block\n");
5901 return -ENOMEM;
5902 }
5903
5904 ctrl_info->pci_dev = pdev;
5905
5906 rc = pqi_pci_init(ctrl_info);
5907 if (rc)
5908 goto error;
5909
5910 rc = pqi_ctrl_init(ctrl_info);
5911 if (rc)
5912 goto error;
5913
5914 return 0;
5915
5916error:
5917 pqi_remove_ctrl(ctrl_info);
5918
5919 return rc;
5920}
5921
5922static void pqi_pci_remove(struct pci_dev *pdev)
5923{
5924 struct pqi_ctrl_info *ctrl_info;
5925
5926 ctrl_info = pci_get_drvdata(pdev);
5927 if (!ctrl_info)
5928 return;
5929
5930 pqi_remove_ctrl(ctrl_info);
5931}
5932
5933static void pqi_shutdown(struct pci_dev *pdev)
5934{
5935 int rc;
5936 struct pqi_ctrl_info *ctrl_info;
5937
5938 ctrl_info = pci_get_drvdata(pdev);
5939 if (!ctrl_info)
5940 goto error;
5941
5942 /*
5943 * Write all data in the controller's battery-backed cache to
5944 * storage.
5945 */
5946 rc = pqi_flush_cache(ctrl_info);
5947 if (rc == 0)
5948 return;
5949
5950error:
5951 dev_warn(&pdev->dev,
5952 "unable to flush controller cache\n");
5953}
5954
5955/* Define the PCI IDs for the controllers that we support. */
5956static const struct pci_device_id pqi_pci_id_table[] = {
5957 {
5958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5959 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5960 },
5961 {
5962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5963 PCI_VENDOR_ID_HP, 0x0600)
5964 },
5965 {
5966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5967 PCI_VENDOR_ID_HP, 0x0601)
5968 },
5969 {
5970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5971 PCI_VENDOR_ID_HP, 0x0602)
5972 },
5973 {
5974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5975 PCI_VENDOR_ID_HP, 0x0603)
5976 },
5977 {
5978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5979 PCI_VENDOR_ID_HP, 0x0650)
5980 },
5981 {
5982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5983 PCI_VENDOR_ID_HP, 0x0651)
5984 },
5985 {
5986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5987 PCI_VENDOR_ID_HP, 0x0652)
5988 },
5989 {
5990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5991 PCI_VENDOR_ID_HP, 0x0653)
5992 },
5993 {
5994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5995 PCI_VENDOR_ID_HP, 0x0654)
5996 },
5997 {
5998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5999 PCI_VENDOR_ID_HP, 0x0655)
6000 },
6001 {
6002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6003 PCI_VENDOR_ID_HP, 0x0700)
6004 },
6005 {
6006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6007 PCI_VENDOR_ID_HP, 0x0701)
6008 },
6009 {
6010 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6011 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6012 },
6013 {
6014 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6015 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6016 },
6017 {
6018 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6019 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6020 },
6021 {
6022 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6023 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6024 },
6025 {
6026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6027 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6028 },
6029 {
6030 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6031 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6032 },
6033 {
6034 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6035 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6036 },
6037 {
6038 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6039 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6040 },
6041 {
6042 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6043 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6044 },
6045 {
6046 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6047 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6048 },
6049 {
6050 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6051 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6052 },
6053 {
6054 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6055 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6056 },
6057 {
6058 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6059 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6060 },
6061 {
6062 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6063 PCI_VENDOR_ID_HP, 0x1001)
6064 },
6065 {
6066 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6067 PCI_VENDOR_ID_HP, 0x1100)
6068 },
6069 {
6070 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6071 PCI_VENDOR_ID_HP, 0x1101)
6072 },
6073 {
6074 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6075 PCI_VENDOR_ID_HP, 0x1102)
6076 },
6077 {
6078 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6079 PCI_VENDOR_ID_HP, 0x1150)
6080 },
6081 {
6082 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6083 PCI_ANY_ID, PCI_ANY_ID)
6084 },
6085 { 0 }
6086};
6087
6088MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6089
6090static struct pci_driver pqi_pci_driver = {
6091 .name = DRIVER_NAME_SHORT,
6092 .id_table = pqi_pci_id_table,
6093 .probe = pqi_pci_probe,
6094 .remove = pqi_pci_remove,
6095 .shutdown = pqi_shutdown,
6096};
6097
6098static int __init pqi_init(void)
6099{
6100 int rc;
6101
6102 pr_info(DRIVER_NAME "\n");
6103
6104 pqi_sas_transport_template =
6105 sas_attach_transport(&pqi_sas_transport_functions);
6106 if (!pqi_sas_transport_template)
6107 return -ENODEV;
6108
6109 rc = pci_register_driver(&pqi_pci_driver);
6110 if (rc)
6111 sas_release_transport(pqi_sas_transport_template);
6112
6113 return rc;
6114}
6115
6116static void __exit pqi_cleanup(void)
6117{
6118 pci_unregister_driver(&pqi_pci_driver);
6119 sas_release_transport(pqi_sas_transport_template);
6120}
6121
6122module_init(pqi_init);
6123module_exit(pqi_cleanup);
6124
6125static void __attribute__((unused)) verify_structures(void)
6126{
6127 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6128 sis_host_to_ctrl_doorbell) != 0x20);
6129 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6130 sis_interrupt_mask) != 0x34);
6131 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6132 sis_ctrl_to_host_doorbell) != 0x9c);
6133 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6134 sis_ctrl_to_host_doorbell_clear) != 0xa0);
6135 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05006136 sis_driver_scratch) != 0xb0);
6137 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05006138 sis_firmware_status) != 0xbc);
6139 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6140 sis_mailbox) != 0x1000);
6141 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6142 pqi_registers) != 0x4000);
6143
6144 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6145 iu_type) != 0x0);
6146 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6147 iu_length) != 0x2);
6148 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6149 response_queue_id) != 0x4);
6150 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6151 work_area) != 0x6);
6152 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6153
6154 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6155 status) != 0x0);
6156 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6157 service_response) != 0x1);
6158 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6159 data_present) != 0x2);
6160 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6161 reserved) != 0x3);
6162 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6163 residual_count) != 0x4);
6164 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6165 data_length) != 0x8);
6166 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6167 reserved1) != 0xa);
6168 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6169 data) != 0xc);
6170 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6171
6172 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6173 data_in_result) != 0x0);
6174 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6175 data_out_result) != 0x1);
6176 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6177 reserved) != 0x2);
6178 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6179 status) != 0x5);
6180 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6181 status_qualifier) != 0x6);
6182 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6183 sense_data_length) != 0x8);
6184 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6185 response_data_length) != 0xa);
6186 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6187 data_in_transferred) != 0xc);
6188 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6189 data_out_transferred) != 0x10);
6190 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6191 data) != 0x14);
6192 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6193
6194 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6195 signature) != 0x0);
6196 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6197 function_and_status_code) != 0x8);
6198 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6199 max_admin_iq_elements) != 0x10);
6200 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6201 max_admin_oq_elements) != 0x11);
6202 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6203 admin_iq_element_length) != 0x12);
6204 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6205 admin_oq_element_length) != 0x13);
6206 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6207 max_reset_timeout) != 0x14);
6208 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6209 legacy_intx_status) != 0x18);
6210 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6211 legacy_intx_mask_set) != 0x1c);
6212 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6213 legacy_intx_mask_clear) != 0x20);
6214 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6215 device_status) != 0x40);
6216 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6217 admin_iq_pi_offset) != 0x48);
6218 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6219 admin_oq_ci_offset) != 0x50);
6220 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6221 admin_iq_element_array_addr) != 0x58);
6222 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6223 admin_oq_element_array_addr) != 0x60);
6224 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6225 admin_iq_ci_addr) != 0x68);
6226 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6227 admin_oq_pi_addr) != 0x70);
6228 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6229 admin_iq_num_elements) != 0x78);
6230 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6231 admin_oq_num_elements) != 0x79);
6232 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6233 admin_queue_int_msg_num) != 0x7a);
6234 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6235 device_error) != 0x80);
6236 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6237 error_details) != 0x88);
6238 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6239 device_reset) != 0x90);
6240 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6241 power_action) != 0x94);
6242 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6243
6244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6245 header.iu_type) != 0);
6246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6247 header.iu_length) != 2);
6248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6249 header.work_area) != 6);
6250 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6251 request_id) != 8);
6252 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6253 function_code) != 10);
6254 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6255 data.report_device_capability.buffer_length) != 44);
6256 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6257 data.report_device_capability.sg_descriptor) != 48);
6258 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6259 data.create_operational_iq.queue_id) != 12);
6260 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6261 data.create_operational_iq.element_array_addr) != 16);
6262 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6263 data.create_operational_iq.ci_addr) != 24);
6264 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6265 data.create_operational_iq.num_elements) != 32);
6266 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6267 data.create_operational_iq.element_length) != 34);
6268 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6269 data.create_operational_iq.queue_protocol) != 36);
6270 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6271 data.create_operational_oq.queue_id) != 12);
6272 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6273 data.create_operational_oq.element_array_addr) != 16);
6274 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6275 data.create_operational_oq.pi_addr) != 24);
6276 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6277 data.create_operational_oq.num_elements) != 32);
6278 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6279 data.create_operational_oq.element_length) != 34);
6280 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6281 data.create_operational_oq.queue_protocol) != 36);
6282 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6283 data.create_operational_oq.int_msg_num) != 40);
6284 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6285 data.create_operational_oq.coalescing_count) != 42);
6286 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6287 data.create_operational_oq.min_coalescing_time) != 44);
6288 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6289 data.create_operational_oq.max_coalescing_time) != 48);
6290 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6291 data.delete_operational_queue.queue_id) != 12);
6292 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6293 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6294 data.create_operational_iq) != 64 - 11);
6295 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6296 data.create_operational_oq) != 64 - 11);
6297 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6298 data.delete_operational_queue) != 64 - 11);
6299
6300 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6301 header.iu_type) != 0);
6302 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6303 header.iu_length) != 2);
6304 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6305 header.work_area) != 6);
6306 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6307 request_id) != 8);
6308 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6309 function_code) != 10);
6310 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6311 status) != 11);
6312 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6313 data.create_operational_iq.status_descriptor) != 12);
6314 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6315 data.create_operational_iq.iq_pi_offset) != 16);
6316 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6317 data.create_operational_oq.status_descriptor) != 12);
6318 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6319 data.create_operational_oq.oq_ci_offset) != 16);
6320 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6321
6322 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6323 header.iu_type) != 0);
6324 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6325 header.iu_length) != 2);
6326 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6327 header.response_queue_id) != 4);
6328 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6329 header.work_area) != 6);
6330 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6331 request_id) != 8);
6332 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6333 nexus_id) != 10);
6334 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6335 buffer_length) != 12);
6336 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6337 lun_number) != 16);
6338 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6339 protocol_specific) != 24);
6340 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6341 error_index) != 27);
6342 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6343 cdb) != 32);
6344 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6345 sg_descriptors) != 64);
6346 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6347 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6348
6349 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6350 header.iu_type) != 0);
6351 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6352 header.iu_length) != 2);
6353 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6354 header.response_queue_id) != 4);
6355 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6356 header.work_area) != 6);
6357 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6358 request_id) != 8);
6359 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6360 nexus_id) != 12);
6361 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6362 buffer_length) != 16);
6363 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6364 data_encryption_key_index) != 22);
6365 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6366 encrypt_tweak_lower) != 24);
6367 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6368 encrypt_tweak_upper) != 28);
6369 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6370 cdb) != 32);
6371 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6372 error_index) != 48);
6373 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6374 num_sg_descriptors) != 50);
6375 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6376 cdb_length) != 51);
6377 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6378 lun_number) != 52);
6379 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6380 sg_descriptors) != 64);
6381 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6382 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6383
6384 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6385 header.iu_type) != 0);
6386 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6387 header.iu_length) != 2);
6388 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6389 request_id) != 8);
6390 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6391 error_index) != 10);
6392
6393 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6394 header.iu_type) != 0);
6395 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6396 header.iu_length) != 2);
6397 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6398 header.response_queue_id) != 4);
6399 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6400 request_id) != 8);
6401 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6402 data.report_event_configuration.buffer_length) != 12);
6403 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6404 data.report_event_configuration.sg_descriptors) != 16);
6405 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6406 data.set_event_configuration.global_event_oq_id) != 10);
6407 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6408 data.set_event_configuration.buffer_length) != 12);
6409 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6410 data.set_event_configuration.sg_descriptors) != 16);
6411
6412 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6413 max_inbound_iu_length) != 6);
6414 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6415 max_outbound_iu_length) != 14);
6416 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6417
6418 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6419 data_length) != 0);
6420 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6421 iq_arbitration_priority_support_bitmask) != 8);
6422 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6423 maximum_aw_a) != 9);
6424 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6425 maximum_aw_b) != 10);
6426 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6427 maximum_aw_c) != 11);
6428 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6429 max_inbound_queues) != 16);
6430 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6431 max_elements_per_iq) != 18);
6432 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6433 max_iq_element_length) != 24);
6434 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6435 min_iq_element_length) != 26);
6436 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6437 max_outbound_queues) != 30);
6438 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6439 max_elements_per_oq) != 32);
6440 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6441 intr_coalescing_time_granularity) != 34);
6442 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6443 max_oq_element_length) != 36);
6444 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6445 min_oq_element_length) != 38);
6446 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6447 iu_layer_descriptors) != 64);
6448 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6449
6450 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6451 event_type) != 0);
6452 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6453 oq_id) != 2);
6454 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6455
6456 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6457 num_event_descriptors) != 2);
6458 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6459 descriptors) != 4);
6460
6461 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6462 header.iu_type) != 0);
6463 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6464 header.iu_length) != 2);
6465 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6466 event_type) != 8);
6467 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6468 event_id) != 10);
6469 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6470 additional_event_id) != 12);
6471 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6472 data) != 16);
6473 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6474
6475 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6476 header.iu_type) != 0);
6477 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6478 header.iu_length) != 2);
6479 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6480 event_type) != 8);
6481 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6482 event_id) != 10);
6483 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6484 additional_event_id) != 12);
6485 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6486
6487 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6488 header.iu_type) != 0);
6489 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6490 header.iu_length) != 2);
6491 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6492 request_id) != 8);
6493 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6494 nexus_id) != 10);
6495 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6496 lun_number) != 16);
6497 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6498 protocol_specific) != 24);
6499 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6500 outbound_queue_id_to_manage) != 26);
6501 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6502 request_id_to_manage) != 28);
6503 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6504 task_management_function) != 30);
6505 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6506
6507 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6508 header.iu_type) != 0);
6509 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6510 header.iu_length) != 2);
6511 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6512 request_id) != 8);
6513 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6514 nexus_id) != 10);
6515 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6516 additional_response_info) != 12);
6517 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6518 response_code) != 15);
6519 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6520
6521 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6522 configured_logical_drive_count) != 0);
6523 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6524 configuration_signature) != 1);
6525 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6526 firmware_version) != 5);
6527 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6528 extended_logical_unit_count) != 154);
6529 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6530 firmware_build_number) != 190);
6531 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6532 controller_mode) != 292);
6533
6534 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6535 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6536 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6537 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6538 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6539 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6540 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6541 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6542 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6543 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6544 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6545 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6546
6547 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6548}