blob: f4080ac832dd0b515626768c259395a5a71c2248 [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
Kevin Barnettb805dbf2017-05-03 18:54:06 -05003 * Copyright (c) 2016-2017 Microsemi Corporation
Kevin Barnett6c223762016-06-27 16:41:00 -05004 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
Kevin Barnett3c509762017-05-03 18:54:37 -050027#include <linux/reboot.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050028#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060029#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050030#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
Don Brace4ae5e9d2018-06-18 13:23:06 -050043#define DRIVER_VERSION "1.1.4-130"
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050044#define DRIVER_MAJOR 1
Kevin Barnettb98117c2017-08-10 13:47:15 -050045#define DRIVER_MINOR 1
Don Brace61c187e2018-03-21 13:32:37 -050046#define DRIVER_RELEASE 4
Don Brace4ae5e9d2018-06-18 13:23:06 -050047#define DRIVER_REVISION 130
Kevin Barnett6c223762016-06-27 16:41:00 -050048
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050049#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
Kevin Barnett6c223762016-06-27 16:41:00 -050051#define DRIVER_NAME_SHORT "smartpqi"
52
Kevin Barnette1d213b2017-05-03 18:53:18 -050053#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
54
Kevin Barnett6c223762016-06-27 16:41:00 -050055MODULE_AUTHOR("Microsemi");
56MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
57 DRIVER_VERSION);
58MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59MODULE_VERSION(DRIVER_VERSION);
60MODULE_LICENSE("GPL");
61
Kevin Barnett6c223762016-06-27 16:41:00 -050062static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett5f310422017-05-03 18:54:55 -050063static void pqi_ctrl_offline_worker(struct work_struct *work);
Kevin Barnett376fb882017-05-03 18:54:43 -050064static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -050065static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66static void pqi_scan_start(struct Scsi_Host *shost);
67static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -050076 struct pqi_encryption_info *encryption_info, bool raid_bypass);
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -060077static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
Kevin Barnett6c223762016-06-27 16:41:00 -050079
80/* for flags argument to pqi_submit_raid_request_synchronous() */
81#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
82
83static struct scsi_transport_template *pqi_sas_transport_template;
84
85static atomic_t pqi_controller_count = ATOMIC_INIT(0);
86
Kevin Barnett3c509762017-05-03 18:54:37 -050087enum pqi_lockup_action {
88 NONE,
89 REBOOT,
90 PANIC
91};
92
93static enum pqi_lockup_action pqi_lockup_action = NONE;
94
95static struct {
96 enum pqi_lockup_action action;
97 char *name;
98} pqi_lockup_actions[] = {
99 {
100 .action = NONE,
101 .name = "none",
102 },
103 {
104 .action = REBOOT,
105 .name = "reboot",
106 },
107 {
108 .action = PANIC,
109 .name = "panic",
110 },
111};
112
Kevin Barnett6a50d6a2017-05-03 18:52:52 -0500113static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
118 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
119 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
120};
121
Kevin Barnett6c223762016-06-27 16:41:00 -0500122static int pqi_disable_device_id_wildcards;
123module_param_named(disable_device_id_wildcards,
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -0500124 pqi_disable_device_id_wildcards, int, 0644);
Kevin Barnett6c223762016-06-27 16:41:00 -0500125MODULE_PARM_DESC(disable_device_id_wildcards,
126 "Disable device ID wildcards.");
127
Kevin Barnett5a259e32017-05-03 18:55:43 -0500128static int pqi_disable_heartbeat;
129module_param_named(disable_heartbeat,
130 pqi_disable_heartbeat, int, 0644);
131MODULE_PARM_DESC(disable_heartbeat,
132 "Disable heartbeat.");
133
134static int pqi_disable_ctrl_shutdown;
135module_param_named(disable_ctrl_shutdown,
136 pqi_disable_ctrl_shutdown, int, 0644);
137MODULE_PARM_DESC(disable_ctrl_shutdown,
138 "Disable controller shutdown when controller locked up.");
139
Kevin Barnett3c509762017-05-03 18:54:37 -0500140static char *pqi_lockup_action_param;
141module_param_named(lockup_action,
142 pqi_lockup_action_param, charp, 0644);
143MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
144 "\t\tSupported: none, reboot, panic\n"
145 "\t\tDefault: none");
146
Kevin Barnett6c223762016-06-27 16:41:00 -0500147static char *raid_levels[] = {
148 "RAID-0",
149 "RAID-4",
150 "RAID-1(1+0)",
151 "RAID-5",
152 "RAID-5+1",
153 "RAID-ADG",
154 "RAID-1(ADM)",
155};
156
157static char *pqi_raid_level_to_string(u8 raid_level)
158{
159 if (raid_level < ARRAY_SIZE(raid_levels))
160 return raid_levels[raid_level];
161
Kevin Barnetta9f93392017-05-03 18:55:31 -0500162 return "RAID UNKNOWN";
Kevin Barnett6c223762016-06-27 16:41:00 -0500163}
164
165#define SA_RAID_0 0
166#define SA_RAID_4 1
167#define SA_RAID_1 2 /* also used for RAID 10 */
168#define SA_RAID_5 3 /* also used for RAID 50 */
169#define SA_RAID_51 4
170#define SA_RAID_6 5 /* also used for RAID 60 */
171#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
172#define SA_RAID_MAX SA_RAID_ADM
173#define SA_RAID_UNKNOWN 0xff
174
175static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
176{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500177 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500178 scmd->scsi_done(scmd);
179}
180
Dave Carrollb6e2ef62018-12-07 16:28:23 -0600181static inline void pqi_disable_write_same(struct scsi_device *sdev)
182{
183 sdev->no_write_same = 1;
184}
185
Kevin Barnett6c223762016-06-27 16:41:00 -0500186static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
187{
188 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
189}
190
191static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
192{
193 void *hostdata = shost_priv(shost);
194
195 return *((struct pqi_ctrl_info **)hostdata);
196}
197
198static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
199{
200 return !device->is_physical_device;
201}
202
Kevin Barnettbd10cf02017-05-03 18:54:12 -0500203static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
204{
205 return scsi3addr[2] != 0;
206}
207
Kevin Barnett6c223762016-06-27 16:41:00 -0500208static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
209{
210 return !ctrl_info->controller_online;
211}
212
213static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
214{
215 if (ctrl_info->controller_online)
216 if (!sis_is_firmware_running(ctrl_info))
217 pqi_take_ctrl_offline(ctrl_info);
218}
219
220static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
221{
222 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
223}
224
Kevin Barnettff6abb72016-08-31 14:54:41 -0500225static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
226 struct pqi_ctrl_info *ctrl_info)
227{
228 return sis_read_driver_scratch(ctrl_info);
229}
230
231static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
232 enum pqi_ctrl_mode mode)
233{
234 sis_write_driver_scratch(ctrl_info, mode);
235}
236
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500237static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
238{
239 ctrl_info->block_requests = true;
240 scsi_block_requests(ctrl_info->scsi_host);
241}
242
243static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
244{
245 ctrl_info->block_requests = false;
246 wake_up_all(&ctrl_info->block_requests_wait);
Kevin Barnett376fb882017-05-03 18:54:43 -0500247 pqi_retry_raid_bypass_requests(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500248 scsi_unblock_requests(ctrl_info->scsi_host);
249}
250
251static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
252{
253 return ctrl_info->block_requests;
254}
255
256static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
257 unsigned long timeout_msecs)
258{
259 unsigned long remaining_msecs;
260
261 if (!pqi_ctrl_blocked(ctrl_info))
262 return timeout_msecs;
263
264 atomic_inc(&ctrl_info->num_blocked_threads);
265
266 if (timeout_msecs == NO_TIMEOUT) {
267 wait_event(ctrl_info->block_requests_wait,
268 !pqi_ctrl_blocked(ctrl_info));
269 remaining_msecs = timeout_msecs;
270 } else {
271 unsigned long remaining_jiffies;
272
273 remaining_jiffies =
274 wait_event_timeout(ctrl_info->block_requests_wait,
275 !pqi_ctrl_blocked(ctrl_info),
276 msecs_to_jiffies(timeout_msecs));
277 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
278 }
279
280 atomic_dec(&ctrl_info->num_blocked_threads);
281
282 return remaining_msecs;
283}
284
285static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
286{
287 atomic_inc(&ctrl_info->num_busy_threads);
288}
289
290static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
291{
292 atomic_dec(&ctrl_info->num_busy_threads);
293}
294
295static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
296{
297 while (atomic_read(&ctrl_info->num_busy_threads) >
298 atomic_read(&ctrl_info->num_blocked_threads))
299 usleep_range(1000, 2000);
300}
301
Kevin Barnett03b288cf2017-05-03 18:54:49 -0500302static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
303{
304 return device->device_offline;
305}
306
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500307static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
308{
309 device->in_reset = true;
310}
311
312static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
313{
314 device->in_reset = false;
315}
316
317static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
318{
319 return device->in_reset;
320}
Kevin Barnett6c223762016-06-27 16:41:00 -0500321
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -0600322static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
323{
324 device->in_remove = true;
325}
326
327static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
328 struct pqi_scsi_dev *device)
329{
330 return device->in_remove & !ctrl_info->in_shutdown;
331}
332
Kevin Barnett5f310422017-05-03 18:54:55 -0500333static inline void pqi_schedule_rescan_worker_with_delay(
334 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
335{
336 if (pqi_ctrl_offline(ctrl_info))
337 return;
338
339 schedule_delayed_work(&ctrl_info->rescan_work, delay);
340}
341
Kevin Barnett6c223762016-06-27 16:41:00 -0500342static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
343{
Kevin Barnett5f310422017-05-03 18:54:55 -0500344 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
345}
346
347#define PQI_RESCAN_WORK_DELAY (10 * HZ)
348
349static inline void pqi_schedule_rescan_worker_delayed(
350 struct pqi_ctrl_info *ctrl_info)
351{
352 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
Kevin Barnett6c223762016-06-27 16:41:00 -0500353}
354
Kevin Barnett061ef062017-05-03 18:53:05 -0500355static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
356{
357 cancel_delayed_work_sync(&ctrl_info->rescan_work);
358}
359
Kevin Barnett98f87662017-05-03 18:53:11 -0500360static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
361{
362 if (!ctrl_info->heartbeat_counter)
363 return 0;
364
365 return readl(ctrl_info->heartbeat_counter);
366}
367
Kevin Barnett6c223762016-06-27 16:41:00 -0500368static int pqi_map_single(struct pci_dev *pci_dev,
369 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200370 size_t buffer_length, enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500371{
372 dma_addr_t bus_address;
373
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200374 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500375 return 0;
376
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200377 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
Kevin Barnett6c223762016-06-27 16:41:00 -0500378 data_direction);
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200379 if (dma_mapping_error(&pci_dev->dev, bus_address))
Kevin Barnett6c223762016-06-27 16:41:00 -0500380 return -ENOMEM;
381
382 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
383 put_unaligned_le32(buffer_length, &sg_descriptor->length);
384 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
385
386 return 0;
387}
388
389static void pqi_pci_unmap(struct pci_dev *pci_dev,
390 struct pqi_sg_descriptor *descriptors, int num_descriptors,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200391 enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500392{
393 int i;
394
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200395 if (data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500396 return;
397
398 for (i = 0; i < num_descriptors; i++)
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200399 dma_unmap_single(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -0500400 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
401 get_unaligned_le32(&descriptors[i].length),
402 data_direction);
403}
404
405static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
406 struct pqi_raid_path_request *request, u8 cmd,
407 u8 *scsi3addr, void *buffer, size_t buffer_length,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200408 u16 vpd_page, enum dma_data_direction *dir)
Kevin Barnett6c223762016-06-27 16:41:00 -0500409{
410 u8 *cdb;
Dave Carroll171c2862018-12-07 16:28:35 -0600411 size_t cdb_length = buffer_length;
Kevin Barnett6c223762016-06-27 16:41:00 -0500412
413 memset(request, 0, sizeof(*request));
414
415 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
416 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
417 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
418 &request->header.iu_length);
419 put_unaligned_le32(buffer_length, &request->buffer_length);
420 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
421 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
422 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
423
424 cdb = request->cdb;
425
426 switch (cmd) {
427 case INQUIRY:
428 request->data_direction = SOP_READ_FLAG;
429 cdb[0] = INQUIRY;
430 if (vpd_page & VPD_PAGE) {
431 cdb[1] = 0x1;
432 cdb[2] = (u8)vpd_page;
433 }
Dave Carroll171c2862018-12-07 16:28:35 -0600434 cdb[4] = (u8)cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -0500435 break;
436 case CISS_REPORT_LOG:
437 case CISS_REPORT_PHYS:
438 request->data_direction = SOP_READ_FLAG;
439 cdb[0] = cmd;
440 if (cmd == CISS_REPORT_PHYS)
441 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
442 else
443 cdb[1] = CISS_REPORT_LOG_EXTENDED;
Dave Carroll171c2862018-12-07 16:28:35 -0600444 put_unaligned_be32(cdb_length, &cdb[6]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500445 break;
446 case CISS_GET_RAID_MAP:
447 request->data_direction = SOP_READ_FLAG;
448 cdb[0] = CISS_READ;
449 cdb[1] = CISS_GET_RAID_MAP;
Dave Carroll171c2862018-12-07 16:28:35 -0600450 put_unaligned_be32(cdb_length, &cdb[6]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500451 break;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500452 case SA_FLUSH_CACHE:
Kevin Barnett6c223762016-06-27 16:41:00 -0500453 request->data_direction = SOP_WRITE_FLAG;
454 cdb[0] = BMIC_WRITE;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500455 cdb[6] = BMIC_FLUSH_CACHE;
Dave Carroll171c2862018-12-07 16:28:35 -0600456 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500457 break;
Dave Carroll171c2862018-12-07 16:28:35 -0600458 case BMIC_SENSE_DIAG_OPTIONS:
459 cdb_length = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -0500460 case BMIC_IDENTIFY_CONTROLLER:
461 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
462 request->data_direction = SOP_READ_FLAG;
463 cdb[0] = BMIC_READ;
464 cdb[6] = cmd;
Dave Carroll171c2862018-12-07 16:28:35 -0600465 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500466 break;
Dave Carroll171c2862018-12-07 16:28:35 -0600467 case BMIC_SET_DIAG_OPTIONS:
468 cdb_length = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -0500469 case BMIC_WRITE_HOST_WELLNESS:
470 request->data_direction = SOP_WRITE_FLAG;
471 cdb[0] = BMIC_WRITE;
472 cdb[6] = cmd;
Dave Carroll171c2862018-12-07 16:28:35 -0600473 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500474 break;
475 default:
476 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
477 cmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500478 break;
479 }
480
481 switch (request->data_direction) {
482 case SOP_READ_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200483 *dir = DMA_FROM_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500484 break;
485 case SOP_WRITE_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200486 *dir = DMA_TO_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500487 break;
488 case SOP_NO_DIRECTION_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200489 *dir = DMA_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500490 break;
491 default:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200492 *dir = DMA_BIDIRECTIONAL;
Kevin Barnett6c223762016-06-27 16:41:00 -0500493 break;
494 }
495
Kevin Barnett6c223762016-06-27 16:41:00 -0500496 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200497 buffer, buffer_length, *dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500498}
499
Kevin Barnett376fb882017-05-03 18:54:43 -0500500static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
501{
502 io_request->scmd = NULL;
503 io_request->status = 0;
504 io_request->error_info = NULL;
505 io_request->raid_bypass = false;
506}
507
Kevin Barnett6c223762016-06-27 16:41:00 -0500508static struct pqi_io_request *pqi_alloc_io_request(
509 struct pqi_ctrl_info *ctrl_info)
510{
511 struct pqi_io_request *io_request;
512 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
513
514 while (1) {
515 io_request = &ctrl_info->io_request_pool[i];
516 if (atomic_inc_return(&io_request->refcount) == 1)
517 break;
518 atomic_dec(&io_request->refcount);
519 i = (i + 1) % ctrl_info->max_io_slots;
520 }
521
522 /* benignly racy */
523 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
524
Kevin Barnett376fb882017-05-03 18:54:43 -0500525 pqi_reinit_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -0500526
527 return io_request;
528}
529
530static void pqi_free_io_request(struct pqi_io_request *io_request)
531{
532 atomic_dec(&io_request->refcount);
533}
534
Dave Carroll02133b62018-12-07 16:28:41 -0600535static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
536 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
537 struct pqi_raid_error_info *error_info,
538 unsigned long timeout_msecs)
Kevin Barnett6c223762016-06-27 16:41:00 -0500539{
540 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200541 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500542 struct pqi_raid_path_request request;
543
544 rc = pqi_build_raid_path_request(ctrl_info, &request,
Dave Carroll02133b62018-12-07 16:28:41 -0600545 cmd, scsi3addr, buffer,
546 buffer_length, vpd_page, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500547 if (rc)
548 return rc;
549
Dave Carroll02133b62018-12-07 16:28:41 -0600550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
551 0, error_info, timeout_msecs);
Kevin Barnett6c223762016-06-27 16:41:00 -0500552
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500554 return rc;
555}
556
Dave Carroll02133b62018-12-07 16:28:41 -0600557/* Helper functions for pqi_send_scsi_raid_request */
558
559static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
560 u8 cmd, void *buffer, size_t buffer_length)
561{
562 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
563 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
564}
565
566static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
567 u8 cmd, void *buffer, size_t buffer_length,
568 struct pqi_raid_error_info *error_info)
569{
570 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
571 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
572}
573
574
575static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
576 struct bmic_identify_controller *buffer)
577{
578 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
579 buffer, sizeof(*buffer));
580}
581
582static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -0500583 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
584{
Dave Carroll02133b62018-12-07 16:28:41 -0600585 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
586 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500587}
588
Dave Carrollcd128242018-12-07 16:28:47 -0600589static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
590 u8 *scsi3addr, u16 vpd_page)
591{
592 int rc;
593 int i;
594 int pages;
595 unsigned char *buf, bufsize;
596
597 buf = kzalloc(256, GFP_KERNEL);
598 if (!buf)
599 return false;
600
601 /* Get the size of the page list first */
602 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
603 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
604 buf, SCSI_VPD_HEADER_SZ);
605 if (rc != 0)
606 goto exit_unsupported;
607
608 pages = buf[3];
609 if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
610 bufsize = pages + SCSI_VPD_HEADER_SZ;
611 else
612 bufsize = 255;
613
614 /* Get the whole VPD page list */
615 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
616 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
617 buf, bufsize);
618 if (rc != 0)
619 goto exit_unsupported;
620
621 pages = buf[3];
622 for (i = 1; i <= pages; i++)
623 if (buf[3 + i] == vpd_page)
624 goto exit_supported;
625
626exit_unsupported:
627 kfree(buf);
628 return false;
629
630exit_supported:
631 kfree(buf);
632 return true;
633}
634
635static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
636 u8 *scsi3addr, u8 *device_id, int buflen)
637{
638 int rc;
639 unsigned char *buf;
640
641 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
642 return 1; /* function not supported */
643
644 buf = kzalloc(64, GFP_KERNEL);
645 if (!buf)
646 return -ENOMEM;
647
648 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
649 VPD_PAGE | SCSI_VPD_DEVICE_ID,
650 buf, 64);
651 if (rc == 0) {
652 if (buflen > 16)
653 buflen = 16;
654 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
655 }
656
657 kfree(buf);
658
659 return rc;
660}
661
Kevin Barnett6c223762016-06-27 16:41:00 -0500662static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
663 struct pqi_scsi_dev *device,
664 struct bmic_identify_physical_device *buffer,
665 size_t buffer_length)
666{
667 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200668 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500669 u16 bmic_device_index;
670 struct pqi_raid_path_request request;
671
672 rc = pqi_build_raid_path_request(ctrl_info, &request,
673 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200674 buffer_length, 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500675 if (rc)
676 return rc;
677
678 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
679 request.cdb[2] = (u8)bmic_device_index;
680 request.cdb[9] = (u8)(bmic_device_index >> 8);
681
682 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
683 0, NULL, NO_TIMEOUT);
684
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200685 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500686 return rc;
687}
688
Kevin Barnett58322fe2017-08-10 13:46:45 -0500689static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
690 enum bmic_flush_cache_shutdown_event shutdown_event)
Kevin Barnett6c223762016-06-27 16:41:00 -0500691{
692 int rc;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500693 struct bmic_flush_cache *flush_cache;
Kevin Barnett6c223762016-06-27 16:41:00 -0500694
695 /*
696 * Don't bother trying to flush the cache if the controller is
697 * locked up.
698 */
699 if (pqi_ctrl_offline(ctrl_info))
700 return -ENXIO;
701
Kevin Barnett58322fe2017-08-10 13:46:45 -0500702 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
703 if (!flush_cache)
Kevin Barnett6c223762016-06-27 16:41:00 -0500704 return -ENOMEM;
705
Kevin Barnett58322fe2017-08-10 13:46:45 -0500706 flush_cache->shutdown_event = shutdown_event;
707
Dave Carroll02133b62018-12-07 16:28:41 -0600708 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
709 sizeof(*flush_cache));
Kevin Barnett6c223762016-06-27 16:41:00 -0500710
Kevin Barnett58322fe2017-08-10 13:46:45 -0500711 kfree(flush_cache);
Kevin Barnett6c223762016-06-27 16:41:00 -0500712
713 return rc;
714}
715
Dave Carroll171c2862018-12-07 16:28:35 -0600716
717#define PQI_FETCH_PTRAID_DATA (1UL<<31)
718
719static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
720{
721 int rc;
Dave Carroll171c2862018-12-07 16:28:35 -0600722 struct bmic_diag_options *diag;
Dave Carroll171c2862018-12-07 16:28:35 -0600723
724 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
725 if (!diag)
726 return -ENOMEM;
727
Dave Carroll02133b62018-12-07 16:28:41 -0600728 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
729 diag, sizeof(*diag));
Dave Carroll171c2862018-12-07 16:28:35 -0600730 if (rc)
731 goto out;
732
733 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
734
Dave Carroll02133b62018-12-07 16:28:41 -0600735 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
736 diag, sizeof(*diag));
Dave Carroll171c2862018-12-07 16:28:35 -0600737out:
738 kfree(diag);
739
740 return rc;
741}
742
Dave Carroll02133b62018-12-07 16:28:41 -0600743static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -0500744 void *buffer, size_t buffer_length)
745{
Dave Carroll02133b62018-12-07 16:28:41 -0600746 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
747 buffer, buffer_length);
Kevin Barnett6c223762016-06-27 16:41:00 -0500748}
749
750#pragma pack(1)
751
752struct bmic_host_wellness_driver_version {
753 u8 start_tag[4];
754 u8 driver_version_tag[2];
755 __le16 driver_version_length;
756 char driver_version[32];
Mahesh Rajashekharab2346b52018-12-07 16:28:29 -0600757 u8 dont_write_tag[2];
Kevin Barnett6c223762016-06-27 16:41:00 -0500758 u8 end_tag[2];
759};
760
761#pragma pack()
762
763static int pqi_write_driver_version_to_host_wellness(
764 struct pqi_ctrl_info *ctrl_info)
765{
766 int rc;
767 struct bmic_host_wellness_driver_version *buffer;
768 size_t buffer_length;
769
770 buffer_length = sizeof(*buffer);
771
772 buffer = kmalloc(buffer_length, GFP_KERNEL);
773 if (!buffer)
774 return -ENOMEM;
775
776 buffer->start_tag[0] = '<';
777 buffer->start_tag[1] = 'H';
778 buffer->start_tag[2] = 'W';
779 buffer->start_tag[3] = '>';
780 buffer->driver_version_tag[0] = 'D';
781 buffer->driver_version_tag[1] = 'V';
782 put_unaligned_le16(sizeof(buffer->driver_version),
783 &buffer->driver_version_length);
Kevin Barnett061ef062017-05-03 18:53:05 -0500784 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
Kevin Barnett6c223762016-06-27 16:41:00 -0500785 sizeof(buffer->driver_version) - 1);
786 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
Mahesh Rajashekharab2346b52018-12-07 16:28:29 -0600787 buffer->dont_write_tag[0] = 'D';
788 buffer->dont_write_tag[1] = 'W';
Kevin Barnett6c223762016-06-27 16:41:00 -0500789 buffer->end_tag[0] = 'Z';
790 buffer->end_tag[1] = 'Z';
791
792 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
793
794 kfree(buffer);
795
796 return rc;
797}
798
799#pragma pack(1)
800
801struct bmic_host_wellness_time {
802 u8 start_tag[4];
803 u8 time_tag[2];
804 __le16 time_length;
805 u8 time[8];
806 u8 dont_write_tag[2];
807 u8 end_tag[2];
808};
809
810#pragma pack()
811
812static int pqi_write_current_time_to_host_wellness(
813 struct pqi_ctrl_info *ctrl_info)
814{
815 int rc;
816 struct bmic_host_wellness_time *buffer;
817 size_t buffer_length;
818 time64_t local_time;
819 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100820 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500821
822 buffer_length = sizeof(*buffer);
823
824 buffer = kmalloc(buffer_length, GFP_KERNEL);
825 if (!buffer)
826 return -ENOMEM;
827
828 buffer->start_tag[0] = '<';
829 buffer->start_tag[1] = 'H';
830 buffer->start_tag[2] = 'W';
831 buffer->start_tag[3] = '>';
832 buffer->time_tag[0] = 'T';
833 buffer->time_tag[1] = 'D';
834 put_unaligned_le16(sizeof(buffer->time),
835 &buffer->time_length);
836
Arnd Bergmanned108582017-02-17 16:03:52 +0100837 local_time = ktime_get_real_seconds();
838 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500839 year = tm.tm_year + 1900;
840
841 buffer->time[0] = bin2bcd(tm.tm_hour);
842 buffer->time[1] = bin2bcd(tm.tm_min);
843 buffer->time[2] = bin2bcd(tm.tm_sec);
844 buffer->time[3] = 0;
845 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
846 buffer->time[5] = bin2bcd(tm.tm_mday);
847 buffer->time[6] = bin2bcd(year / 100);
848 buffer->time[7] = bin2bcd(year % 100);
849
850 buffer->dont_write_tag[0] = 'D';
851 buffer->dont_write_tag[1] = 'W';
852 buffer->end_tag[0] = 'Z';
853 buffer->end_tag[1] = 'Z';
854
855 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
856
857 kfree(buffer);
858
859 return rc;
860}
861
862#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
863
864static void pqi_update_time_worker(struct work_struct *work)
865{
866 int rc;
867 struct pqi_ctrl_info *ctrl_info;
868
869 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
870 update_time_work);
871
Kevin Barnett5f310422017-05-03 18:54:55 -0500872 if (pqi_ctrl_offline(ctrl_info))
873 return;
874
Kevin Barnett6c223762016-06-27 16:41:00 -0500875 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
876 if (rc)
877 dev_warn(&ctrl_info->pci_dev->dev,
878 "error updating time on controller\n");
879
880 schedule_delayed_work(&ctrl_info->update_time_work,
881 PQI_UPDATE_TIME_WORK_INTERVAL);
882}
883
884static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500885 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500886{
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500887 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett061ef062017-05-03 18:53:05 -0500888}
889
890static inline void pqi_cancel_update_time_worker(
891 struct pqi_ctrl_info *ctrl_info)
892{
Kevin Barnett061ef062017-05-03 18:53:05 -0500893 cancel_delayed_work_sync(&ctrl_info->update_time_work);
Kevin Barnett6c223762016-06-27 16:41:00 -0500894}
895
Dave Carroll02133b62018-12-07 16:28:41 -0600896static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
Kevin Barnett6c223762016-06-27 16:41:00 -0500897 void *buffer, size_t buffer_length)
898{
Dave Carroll02133b62018-12-07 16:28:41 -0600899 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
900 buffer_length);
Kevin Barnett6c223762016-06-27 16:41:00 -0500901}
902
903static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
904 void **buffer)
905{
906 int rc;
907 size_t lun_list_length;
908 size_t lun_data_length;
909 size_t new_lun_list_length;
910 void *lun_data = NULL;
911 struct report_lun_header *report_lun_header;
912
913 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
914 if (!report_lun_header) {
915 rc = -ENOMEM;
916 goto out;
917 }
918
919 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
920 sizeof(*report_lun_header));
921 if (rc)
922 goto out;
923
924 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
925
926again:
927 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
928
929 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
930 if (!lun_data) {
931 rc = -ENOMEM;
932 goto out;
933 }
934
935 if (lun_list_length == 0) {
936 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
937 goto out;
938 }
939
940 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
941 if (rc)
942 goto out;
943
944 new_lun_list_length = get_unaligned_be32(
945 &((struct report_lun_header *)lun_data)->list_length);
946
947 if (new_lun_list_length > lun_list_length) {
948 lun_list_length = new_lun_list_length;
949 kfree(lun_data);
950 goto again;
951 }
952
953out:
954 kfree(report_lun_header);
955
956 if (rc) {
957 kfree(lun_data);
958 lun_data = NULL;
959 }
960
961 *buffer = lun_data;
962
963 return rc;
964}
965
966static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
967 void **buffer)
968{
969 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
970 buffer);
971}
972
973static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
974 void **buffer)
975{
976 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
977}
978
979static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
980 struct report_phys_lun_extended **physdev_list,
981 struct report_log_lun_extended **logdev_list)
982{
983 int rc;
984 size_t logdev_list_length;
985 size_t logdev_data_length;
986 struct report_log_lun_extended *internal_logdev_list;
987 struct report_log_lun_extended *logdev_data;
988 struct report_lun_header report_lun_header;
989
990 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
991 if (rc)
992 dev_err(&ctrl_info->pci_dev->dev,
993 "report physical LUNs failed\n");
994
995 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
996 if (rc)
997 dev_err(&ctrl_info->pci_dev->dev,
998 "report logical LUNs failed\n");
999
1000 /*
1001 * Tack the controller itself onto the end of the logical device list.
1002 */
1003
1004 logdev_data = *logdev_list;
1005
1006 if (logdev_data) {
1007 logdev_list_length =
1008 get_unaligned_be32(&logdev_data->header.list_length);
1009 } else {
1010 memset(&report_lun_header, 0, sizeof(report_lun_header));
1011 logdev_data =
1012 (struct report_log_lun_extended *)&report_lun_header;
1013 logdev_list_length = 0;
1014 }
1015
1016 logdev_data_length = sizeof(struct report_lun_header) +
1017 logdev_list_length;
1018
1019 internal_logdev_list = kmalloc(logdev_data_length +
1020 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1021 if (!internal_logdev_list) {
1022 kfree(*logdev_list);
1023 *logdev_list = NULL;
1024 return -ENOMEM;
1025 }
1026
1027 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1028 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1029 sizeof(struct report_log_lun_extended_entry));
1030 put_unaligned_be32(logdev_list_length +
1031 sizeof(struct report_log_lun_extended_entry),
1032 &internal_logdev_list->header.list_length);
1033
1034 kfree(*logdev_list);
1035 *logdev_list = internal_logdev_list;
1036
1037 return 0;
1038}
1039
1040static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1041 int bus, int target, int lun)
1042{
1043 device->bus = bus;
1044 device->target = target;
1045 device->lun = lun;
1046}
1047
1048static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1049{
1050 u8 *scsi3addr;
1051 u32 lunid;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001052 int bus;
1053 int target;
1054 int lun;
Kevin Barnett6c223762016-06-27 16:41:00 -05001055
1056 scsi3addr = device->scsi3addr;
1057 lunid = get_unaligned_le32(scsi3addr);
1058
1059 if (pqi_is_hba_lunid(scsi3addr)) {
1060 /* The specified device is the controller. */
1061 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1062 device->target_lun_valid = true;
1063 return;
1064 }
1065
1066 if (pqi_is_logical_device(device)) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001067 if (device->is_external_raid_device) {
1068 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1069 target = (lunid >> 16) & 0x3fff;
1070 lun = lunid & 0xff;
1071 } else {
1072 bus = PQI_RAID_VOLUME_BUS;
1073 target = 0;
1074 lun = lunid & 0x3fff;
1075 }
1076 pqi_set_bus_target_lun(device, bus, target, lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05001077 device->target_lun_valid = true;
1078 return;
1079 }
1080
1081 /*
1082 * Defer target and LUN assignment for non-controller physical devices
1083 * because the SAS transport layer will make these assignments later.
1084 */
1085 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1086}
1087
1088static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1089 struct pqi_scsi_dev *device)
1090{
1091 int rc;
1092 u8 raid_level;
1093 u8 *buffer;
1094
1095 raid_level = SA_RAID_UNKNOWN;
1096
1097 buffer = kmalloc(64, GFP_KERNEL);
1098 if (buffer) {
1099 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1100 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1101 if (rc == 0) {
1102 raid_level = buffer[8];
1103 if (raid_level > SA_RAID_MAX)
1104 raid_level = SA_RAID_UNKNOWN;
1105 }
1106 kfree(buffer);
1107 }
1108
1109 device->raid_level = raid_level;
1110}
1111
1112static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1113 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1114{
1115 char *err_msg;
1116 u32 raid_map_size;
1117 u32 r5or6_blocks_per_row;
Kevin Barnett6c223762016-06-27 16:41:00 -05001118
1119 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1120
1121 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1122 err_msg = "RAID map too small";
1123 goto bad_raid_map;
1124 }
1125
Kevin Barnett6c223762016-06-27 16:41:00 -05001126 if (device->raid_level == SA_RAID_1) {
1127 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1128 err_msg = "invalid RAID-1 map";
1129 goto bad_raid_map;
1130 }
1131 } else if (device->raid_level == SA_RAID_ADM) {
1132 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1133 err_msg = "invalid RAID-1(ADM) map";
1134 goto bad_raid_map;
1135 }
1136 } else if ((device->raid_level == SA_RAID_5 ||
1137 device->raid_level == SA_RAID_6) &&
1138 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1139 /* RAID 50/60 */
1140 r5or6_blocks_per_row =
1141 get_unaligned_le16(&raid_map->strip_size) *
1142 get_unaligned_le16(&raid_map->data_disks_per_row);
1143 if (r5or6_blocks_per_row == 0) {
1144 err_msg = "invalid RAID-5 or RAID-6 map";
1145 goto bad_raid_map;
1146 }
1147 }
1148
1149 return 0;
1150
1151bad_raid_map:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001152 dev_warn(&ctrl_info->pci_dev->dev,
Kevin Barnett38a73382017-09-27 16:30:05 -05001153 "logical device %08x%08x %s\n",
1154 *((u32 *)&device->scsi3addr),
1155 *((u32 *)&device->scsi3addr[4]), err_msg);
Kevin Barnett6c223762016-06-27 16:41:00 -05001156
1157 return -EINVAL;
1158}
1159
1160static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1161 struct pqi_scsi_dev *device)
1162{
1163 int rc;
Ajish Koshya91aaae2018-12-07 16:29:31 -06001164 u32 raid_map_size;
Kevin Barnett6c223762016-06-27 16:41:00 -05001165 struct raid_map *raid_map;
1166
1167 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1168 if (!raid_map)
1169 return -ENOMEM;
1170
Ajish Koshya91aaae2018-12-07 16:29:31 -06001171 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1172 device->scsi3addr, raid_map, sizeof(*raid_map),
1173 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -05001174
1175 if (rc)
1176 goto error;
1177
Ajish Koshya91aaae2018-12-07 16:29:31 -06001178 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1179
1180 if (raid_map_size > sizeof(*raid_map)) {
1181
1182 kfree(raid_map);
1183
1184 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1185 if (!raid_map)
1186 return -ENOMEM;
1187
1188 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1189 device->scsi3addr, raid_map, raid_map_size,
1190 0, NULL, NO_TIMEOUT);
1191 if (rc)
1192 goto error;
1193
1194 if (get_unaligned_le32(&raid_map->structure_size)
1195 != raid_map_size) {
1196 dev_warn(&ctrl_info->pci_dev->dev,
1197 "Requested %d bytes, received %d bytes",
1198 raid_map_size,
1199 get_unaligned_le32(&raid_map->structure_size));
1200 goto error;
1201 }
1202 }
1203
Kevin Barnett6c223762016-06-27 16:41:00 -05001204 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1205 if (rc)
1206 goto error;
1207
1208 device->raid_map = raid_map;
1209
1210 return 0;
1211
1212error:
1213 kfree(raid_map);
1214
1215 return rc;
1216}
1217
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001218static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05001219 struct pqi_scsi_dev *device)
1220{
1221 int rc;
1222 u8 *buffer;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001223 u8 bypass_status;
Kevin Barnett6c223762016-06-27 16:41:00 -05001224
1225 buffer = kmalloc(64, GFP_KERNEL);
1226 if (!buffer)
1227 return;
1228
1229 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001230 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
Kevin Barnett6c223762016-06-27 16:41:00 -05001231 if (rc)
1232 goto out;
1233
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001234#define RAID_BYPASS_STATUS 4
1235#define RAID_BYPASS_CONFIGURED 0x1
1236#define RAID_BYPASS_ENABLED 0x2
Kevin Barnett6c223762016-06-27 16:41:00 -05001237
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001238 bypass_status = buffer[RAID_BYPASS_STATUS];
1239 device->raid_bypass_configured =
1240 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1241 if (device->raid_bypass_configured &&
1242 (bypass_status & RAID_BYPASS_ENABLED) &&
1243 pqi_get_raid_map(ctrl_info, device) == 0)
1244 device->raid_bypass_enabled = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001245
1246out:
1247 kfree(buffer);
1248}
1249
1250/*
1251 * Use vendor-specific VPD to determine online/offline status of a volume.
1252 */
1253
1254static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1255 struct pqi_scsi_dev *device)
1256{
1257 int rc;
1258 size_t page_length;
1259 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1260 bool volume_offline = true;
1261 u32 volume_flags;
1262 struct ciss_vpd_logical_volume_status *vpd;
1263
1264 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1265 if (!vpd)
1266 goto no_buffer;
1267
1268 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1269 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1270 if (rc)
1271 goto out;
1272
1273 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1274 volume_status) + vpd->page_length;
1275 if (page_length < sizeof(*vpd))
1276 goto out;
1277
1278 volume_status = vpd->volume_status;
1279 volume_flags = get_unaligned_be32(&vpd->flags);
1280 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1281
1282out:
1283 kfree(vpd);
1284no_buffer:
1285 device->volume_status = volume_status;
1286 device->volume_offline = volume_offline;
1287}
1288
Kevin Barnett26b390a2018-06-18 13:22:48 -05001289#define PQI_INQUIRY_PAGE0_RETRIES 3
1290
Kevin Barnett6c223762016-06-27 16:41:00 -05001291static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1292 struct pqi_scsi_dev *device)
1293{
1294 int rc;
1295 u8 *buffer;
Kevin Barnett26b390a2018-06-18 13:22:48 -05001296 unsigned int retries;
Kevin Barnett6c223762016-06-27 16:41:00 -05001297
1298 buffer = kmalloc(64, GFP_KERNEL);
1299 if (!buffer)
1300 return -ENOMEM;
1301
1302 /* Send an inquiry to the device to see what it is. */
Kevin Barnett26b390a2018-06-18 13:22:48 -05001303 for (retries = 0;;) {
1304 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1305 buffer, 64);
1306 if (rc == 0)
1307 break;
1308 if (pqi_is_logical_device(device) ||
1309 rc != PQI_CMD_STATUS_ABORTED ||
1310 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1311 goto out;
1312 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001313
1314 scsi_sanitize_inquiry_string(&buffer[8], 8);
1315 scsi_sanitize_inquiry_string(&buffer[16], 16);
1316
1317 device->devtype = buffer[0] & 0x1f;
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001318 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1319 memcpy(device->model, &buffer[16], sizeof(device->model));
Kevin Barnett6c223762016-06-27 16:41:00 -05001320
1321 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001322 if (device->is_external_raid_device) {
1323 device->raid_level = SA_RAID_UNKNOWN;
1324 device->volume_status = CISS_LV_OK;
1325 device->volume_offline = false;
1326 } else {
1327 pqi_get_raid_level(ctrl_info, device);
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001328 pqi_get_raid_bypass_status(ctrl_info, device);
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001329 pqi_get_volume_status(ctrl_info, device);
1330 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001331 }
1332
Dave Carrollcd128242018-12-07 16:28:47 -06001333 if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1334 device->unique_id, sizeof(device->unique_id)) < 0)
1335 dev_warn(&ctrl_info->pci_dev->dev,
1336 "Can't get device id for scsi %d:%d:%d:%d\n",
1337 ctrl_info->scsi_host->host_no,
1338 device->bus, device->target,
1339 device->lun);
1340
Kevin Barnett6c223762016-06-27 16:41:00 -05001341out:
1342 kfree(buffer);
1343
1344 return rc;
1345}
1346
1347static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1348 struct pqi_scsi_dev *device,
1349 struct bmic_identify_physical_device *id_phys)
1350{
1351 int rc;
1352
1353 memset(id_phys, 0, sizeof(*id_phys));
1354
1355 rc = pqi_identify_physical_device(ctrl_info, device,
1356 id_phys, sizeof(*id_phys));
1357 if (rc) {
1358 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1359 return;
1360 }
1361
1362 device->queue_depth =
1363 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1364 device->device_type = id_phys->device_type;
1365 device->active_path_index = id_phys->active_path_number;
1366 device->path_map = id_phys->redundant_path_present_map;
1367 memcpy(&device->box,
1368 &id_phys->alternate_paths_phys_box_on_port,
1369 sizeof(device->box));
1370 memcpy(&device->phys_connector,
1371 &id_phys->alternate_paths_phys_connector,
1372 sizeof(device->phys_connector));
1373 device->bay = id_phys->phys_bay_in_box;
1374}
1375
1376static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1377 struct pqi_scsi_dev *device)
1378{
1379 char *status;
1380 static const char unknown_state_str[] =
1381 "Volume is in an unknown state (%u)";
1382 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1383
1384 switch (device->volume_status) {
1385 case CISS_LV_OK:
1386 status = "Volume online";
1387 break;
1388 case CISS_LV_FAILED:
1389 status = "Volume failed";
1390 break;
1391 case CISS_LV_NOT_CONFIGURED:
1392 status = "Volume not configured";
1393 break;
1394 case CISS_LV_DEGRADED:
1395 status = "Volume degraded";
1396 break;
1397 case CISS_LV_READY_FOR_RECOVERY:
1398 status = "Volume ready for recovery operation";
1399 break;
1400 case CISS_LV_UNDERGOING_RECOVERY:
1401 status = "Volume undergoing recovery";
1402 break;
1403 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1404 status = "Wrong physical drive was replaced";
1405 break;
1406 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1407 status = "A physical drive not properly connected";
1408 break;
1409 case CISS_LV_HARDWARE_OVERHEATING:
1410 status = "Hardware is overheating";
1411 break;
1412 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1413 status = "Hardware has overheated";
1414 break;
1415 case CISS_LV_UNDERGOING_EXPANSION:
1416 status = "Volume undergoing expansion";
1417 break;
1418 case CISS_LV_NOT_AVAILABLE:
1419 status = "Volume waiting for transforming volume";
1420 break;
1421 case CISS_LV_QUEUED_FOR_EXPANSION:
1422 status = "Volume queued for expansion";
1423 break;
1424 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1425 status = "Volume disabled due to SCSI ID conflict";
1426 break;
1427 case CISS_LV_EJECTED:
1428 status = "Volume has been ejected";
1429 break;
1430 case CISS_LV_UNDERGOING_ERASE:
1431 status = "Volume undergoing background erase";
1432 break;
1433 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1434 status = "Volume ready for predictive spare rebuild";
1435 break;
1436 case CISS_LV_UNDERGOING_RPI:
1437 status = "Volume undergoing rapid parity initialization";
1438 break;
1439 case CISS_LV_PENDING_RPI:
1440 status = "Volume queued for rapid parity initialization";
1441 break;
1442 case CISS_LV_ENCRYPTED_NO_KEY:
1443 status = "Encrypted volume inaccessible - key not present";
1444 break;
1445 case CISS_LV_UNDERGOING_ENCRYPTION:
1446 status = "Volume undergoing encryption process";
1447 break;
1448 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1449 status = "Volume undergoing encryption re-keying process";
1450 break;
1451 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001452 status = "Volume encrypted but encryption is disabled";
Kevin Barnett6c223762016-06-27 16:41:00 -05001453 break;
1454 case CISS_LV_PENDING_ENCRYPTION:
1455 status = "Volume pending migration to encrypted state";
1456 break;
1457 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1458 status = "Volume pending encryption rekeying";
1459 break;
1460 case CISS_LV_NOT_SUPPORTED:
1461 status = "Volume not supported on this controller";
1462 break;
1463 case CISS_LV_STATUS_UNAVAILABLE:
1464 status = "Volume status not available";
1465 break;
1466 default:
1467 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1468 unknown_state_str, device->volume_status);
1469 status = unknown_state_buffer;
1470 break;
1471 }
1472
1473 dev_info(&ctrl_info->pci_dev->dev,
1474 "scsi %d:%d:%d:%d %s\n",
1475 ctrl_info->scsi_host->host_no,
1476 device->bus, device->target, device->lun, status);
1477}
1478
Kevin Barnett6c223762016-06-27 16:41:00 -05001479static void pqi_rescan_worker(struct work_struct *work)
1480{
1481 struct pqi_ctrl_info *ctrl_info;
1482
1483 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1484 rescan_work);
1485
1486 pqi_scan_scsi_devices(ctrl_info);
1487}
1488
1489static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1490 struct pqi_scsi_dev *device)
1491{
1492 int rc;
1493
1494 if (pqi_is_logical_device(device))
1495 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1496 device->target, device->lun);
1497 else
1498 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1499
1500 return rc;
1501}
1502
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06001503#define PQI_PENDING_IO_TIMEOUT_SECS 20
1504
Kevin Barnett6c223762016-06-27 16:41:00 -05001505static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1506 struct pqi_scsi_dev *device)
1507{
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06001508 int rc;
1509
1510 pqi_device_remove_start(device);
1511
1512 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1513 PQI_PENDING_IO_TIMEOUT_SECS);
1514 if (rc)
1515 dev_err(&ctrl_info->pci_dev->dev,
1516 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1517 ctrl_info->scsi_host->host_no, device->bus,
1518 device->target, device->lun,
1519 atomic_read(&device->scsi_cmds_outstanding));
1520
Kevin Barnett6c223762016-06-27 16:41:00 -05001521 if (pqi_is_logical_device(device))
1522 scsi_remove_device(device->sdev);
1523 else
1524 pqi_remove_sas_device(device);
1525}
1526
1527/* Assumes the SCSI device list lock is held. */
1528
1529static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1530 int bus, int target, int lun)
1531{
1532 struct pqi_scsi_dev *device;
1533
1534 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1535 scsi_device_list_entry)
1536 if (device->bus == bus && device->target == target &&
1537 device->lun == lun)
1538 return device;
1539
1540 return NULL;
1541}
1542
1543static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1544 struct pqi_scsi_dev *dev2)
1545{
1546 if (dev1->is_physical_device != dev2->is_physical_device)
1547 return false;
1548
1549 if (dev1->is_physical_device)
1550 return dev1->wwid == dev2->wwid;
1551
1552 return memcmp(dev1->volume_id, dev2->volume_id,
1553 sizeof(dev1->volume_id)) == 0;
1554}
1555
1556enum pqi_find_result {
1557 DEVICE_NOT_FOUND,
1558 DEVICE_CHANGED,
1559 DEVICE_SAME,
1560};
1561
1562static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1563 struct pqi_scsi_dev *device_to_find,
1564 struct pqi_scsi_dev **matching_device)
1565{
1566 struct pqi_scsi_dev *device;
1567
1568 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1569 scsi_device_list_entry) {
1570 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1571 device->scsi3addr)) {
1572 *matching_device = device;
1573 if (pqi_device_equal(device_to_find, device)) {
1574 if (device_to_find->volume_offline)
1575 return DEVICE_CHANGED;
1576 return DEVICE_SAME;
1577 }
1578 return DEVICE_CHANGED;
1579 }
1580 }
1581
1582 return DEVICE_NOT_FOUND;
1583}
1584
Kevin Barnett6de783f2017-05-03 18:55:19 -05001585#define PQI_DEV_INFO_BUFFER_LENGTH 128
1586
Kevin Barnett6c223762016-06-27 16:41:00 -05001587static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1588 char *action, struct pqi_scsi_dev *device)
1589{
Kevin Barnett6de783f2017-05-03 18:55:19 -05001590 ssize_t count;
1591 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1592
1593 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1594 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1595
1596 if (device->target_lun_valid)
1597 count += snprintf(buffer + count,
1598 PQI_DEV_INFO_BUFFER_LENGTH - count,
1599 "%d:%d",
1600 device->target,
1601 device->lun);
1602 else
1603 count += snprintf(buffer + count,
1604 PQI_DEV_INFO_BUFFER_LENGTH - count,
1605 "-:-");
1606
1607 if (pqi_is_logical_device(device))
1608 count += snprintf(buffer + count,
1609 PQI_DEV_INFO_BUFFER_LENGTH - count,
1610 " %08x%08x",
1611 *((u32 *)&device->scsi3addr),
1612 *((u32 *)&device->scsi3addr[4]));
1613 else
1614 count += snprintf(buffer + count,
1615 PQI_DEV_INFO_BUFFER_LENGTH - count,
1616 " %016llx", device->sas_address);
1617
1618 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1619 " %s %.8s %.16s ",
Kevin Barnett6c223762016-06-27 16:41:00 -05001620 scsi_device_type(device->devtype),
1621 device->vendor,
Kevin Barnett6de783f2017-05-03 18:55:19 -05001622 device->model);
1623
1624 if (pqi_is_logical_device(device)) {
1625 if (device->devtype == TYPE_DISK)
1626 count += snprintf(buffer + count,
1627 PQI_DEV_INFO_BUFFER_LENGTH - count,
1628 "SSDSmartPathCap%c En%c %-12s",
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001629 device->raid_bypass_configured ? '+' : '-',
1630 device->raid_bypass_enabled ? '+' : '-',
Kevin Barnett6de783f2017-05-03 18:55:19 -05001631 pqi_raid_level_to_string(device->raid_level));
1632 } else {
1633 count += snprintf(buffer + count,
1634 PQI_DEV_INFO_BUFFER_LENGTH - count,
1635 "AIO%c", device->aio_enabled ? '+' : '-');
1636 if (device->devtype == TYPE_DISK ||
1637 device->devtype == TYPE_ZBC)
1638 count += snprintf(buffer + count,
1639 PQI_DEV_INFO_BUFFER_LENGTH - count,
1640 " qd=%-6d", device->queue_depth);
1641 }
1642
1643 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
Kevin Barnett6c223762016-06-27 16:41:00 -05001644}
1645
1646/* Assumes the SCSI device list lock is held. */
1647
1648static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1649 struct pqi_scsi_dev *new_device)
1650{
1651 existing_device->devtype = new_device->devtype;
1652 existing_device->device_type = new_device->device_type;
1653 existing_device->bus = new_device->bus;
1654 if (new_device->target_lun_valid) {
1655 existing_device->target = new_device->target;
1656 existing_device->lun = new_device->lun;
1657 existing_device->target_lun_valid = true;
1658 }
1659
1660 /* By definition, the scsi3addr and wwid fields are already the same. */
1661
1662 existing_device->is_physical_device = new_device->is_physical_device;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001663 existing_device->is_external_raid_device =
1664 new_device->is_external_raid_device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001665 existing_device->aio_enabled = new_device->aio_enabled;
1666 memcpy(existing_device->vendor, new_device->vendor,
1667 sizeof(existing_device->vendor));
1668 memcpy(existing_device->model, new_device->model,
1669 sizeof(existing_device->model));
1670 existing_device->sas_address = new_device->sas_address;
1671 existing_device->raid_level = new_device->raid_level;
1672 existing_device->queue_depth = new_device->queue_depth;
1673 existing_device->aio_handle = new_device->aio_handle;
1674 existing_device->volume_status = new_device->volume_status;
1675 existing_device->active_path_index = new_device->active_path_index;
1676 existing_device->path_map = new_device->path_map;
1677 existing_device->bay = new_device->bay;
1678 memcpy(existing_device->box, new_device->box,
1679 sizeof(existing_device->box));
1680 memcpy(existing_device->phys_connector, new_device->phys_connector,
1681 sizeof(existing_device->phys_connector));
Kevin Barnett6c223762016-06-27 16:41:00 -05001682 existing_device->offload_to_mirror = 0;
1683 kfree(existing_device->raid_map);
1684 existing_device->raid_map = new_device->raid_map;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001685 existing_device->raid_bypass_configured =
1686 new_device->raid_bypass_configured;
1687 existing_device->raid_bypass_enabled =
1688 new_device->raid_bypass_enabled;
Dave Carrolla9a68102018-12-07 16:29:37 -06001689 existing_device->device_offline = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05001690
1691 /* To prevent this from being freed later. */
1692 new_device->raid_map = NULL;
1693}
1694
1695static inline void pqi_free_device(struct pqi_scsi_dev *device)
1696{
1697 if (device) {
1698 kfree(device->raid_map);
1699 kfree(device);
1700 }
1701}
1702
1703/*
1704 * Called when exposing a new device to the OS fails in order to re-adjust
1705 * our internal SCSI device list to match the SCSI ML's view.
1706 */
1707
1708static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1709 struct pqi_scsi_dev *device)
1710{
1711 unsigned long flags;
1712
1713 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1714 list_del(&device->scsi_device_list_entry);
1715 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1716
1717 /* Allow the device structure to be freed later. */
1718 device->keep_device = false;
1719}
1720
1721static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1722 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1723{
1724 int rc;
1725 unsigned int i;
1726 unsigned long flags;
1727 enum pqi_find_result find_result;
1728 struct pqi_scsi_dev *device;
1729 struct pqi_scsi_dev *next;
1730 struct pqi_scsi_dev *matching_device;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001731 LIST_HEAD(add_list);
1732 LIST_HEAD(delete_list);
Kevin Barnett6c223762016-06-27 16:41:00 -05001733
1734 /*
1735 * The idea here is to do as little work as possible while holding the
1736 * spinlock. That's why we go to great pains to defer anything other
1737 * than updating the internal device list until after we release the
1738 * spinlock.
1739 */
1740
1741 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1742
1743 /* Assume that all devices in the existing list have gone away. */
1744 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1745 scsi_device_list_entry)
1746 device->device_gone = true;
1747
1748 for (i = 0; i < num_new_devices; i++) {
1749 device = new_device_list[i];
1750
1751 find_result = pqi_scsi_find_entry(ctrl_info, device,
1752 &matching_device);
1753
1754 switch (find_result) {
1755 case DEVICE_SAME:
1756 /*
1757 * The newly found device is already in the existing
1758 * device list.
1759 */
1760 device->new_device = false;
1761 matching_device->device_gone = false;
1762 pqi_scsi_update_device(matching_device, device);
1763 break;
1764 case DEVICE_NOT_FOUND:
1765 /*
1766 * The newly found device is NOT in the existing device
1767 * list.
1768 */
1769 device->new_device = true;
1770 break;
1771 case DEVICE_CHANGED:
1772 /*
1773 * The original device has gone away and we need to add
1774 * the new device.
1775 */
1776 device->new_device = true;
1777 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05001778 }
1779 }
1780
1781 /* Process all devices that have gone away. */
1782 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1783 scsi_device_list_entry) {
1784 if (device->device_gone) {
1785 list_del(&device->scsi_device_list_entry);
1786 list_add_tail(&device->delete_list_entry, &delete_list);
1787 }
1788 }
1789
1790 /* Process all new devices. */
1791 for (i = 0; i < num_new_devices; i++) {
1792 device = new_device_list[i];
1793 if (!device->new_device)
1794 continue;
1795 if (device->volume_offline)
1796 continue;
1797 list_add_tail(&device->scsi_device_list_entry,
1798 &ctrl_info->scsi_device_list);
1799 list_add_tail(&device->add_list_entry, &add_list);
1800 /* To prevent this device structure from being freed later. */
1801 device->keep_device = true;
1802 }
1803
Kevin Barnett6c223762016-06-27 16:41:00 -05001804 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1805
1806 /* Remove all devices that have gone away. */
1807 list_for_each_entry_safe(device, next, &delete_list,
1808 delete_list_entry) {
Kevin Barnett6c223762016-06-27 16:41:00 -05001809 if (device->volume_offline) {
1810 pqi_dev_info(ctrl_info, "offline", device);
1811 pqi_show_volume_status(ctrl_info, device);
1812 } else {
1813 pqi_dev_info(ctrl_info, "removed", device);
1814 }
Kevin Barnett6de783f2017-05-03 18:55:19 -05001815 if (device->sdev)
1816 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001817 list_del(&device->delete_list_entry);
1818 pqi_free_device(device);
1819 }
1820
1821 /*
1822 * Notify the SCSI ML if the queue depth of any existing device has
1823 * changed.
1824 */
1825 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1826 scsi_device_list_entry) {
1827 if (device->sdev && device->queue_depth !=
1828 device->advertised_queue_depth) {
1829 device->advertised_queue_depth = device->queue_depth;
1830 scsi_change_queue_depth(device->sdev,
1831 device->advertised_queue_depth);
1832 }
1833 }
1834
1835 /* Expose any new devices. */
1836 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
Kevin Barnett94086f52017-05-03 18:54:31 -05001837 if (!device->sdev) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05001838 pqi_dev_info(ctrl_info, "added", device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001839 rc = pqi_add_device(ctrl_info, device);
1840 if (rc) {
1841 dev_warn(&ctrl_info->pci_dev->dev,
1842 "scsi %d:%d:%d:%d addition failed, device not added\n",
1843 ctrl_info->scsi_host->host_no,
1844 device->bus, device->target,
1845 device->lun);
1846 pqi_fixup_botched_add(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001847 }
1848 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001849 }
1850}
1851
1852static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1853{
1854 bool is_supported = false;
1855
1856 switch (device->devtype) {
1857 case TYPE_DISK:
1858 case TYPE_ZBC:
1859 case TYPE_TAPE:
1860 case TYPE_MEDIUM_CHANGER:
1861 case TYPE_ENCLOSURE:
1862 is_supported = true;
1863 break;
1864 case TYPE_RAID:
1865 /*
1866 * Only support the HBA controller itself as a RAID
1867 * controller. If it's a RAID controller other than
Kevin Barnett376fb882017-05-03 18:54:43 -05001868 * the HBA itself (an external RAID controller, for
1869 * example), we don't support it.
Kevin Barnett6c223762016-06-27 16:41:00 -05001870 */
1871 if (pqi_is_hba_lunid(device->scsi3addr))
1872 is_supported = true;
1873 break;
1874 }
1875
1876 return is_supported;
1877}
1878
Kevin Barnett94086f52017-05-03 18:54:31 -05001879static inline bool pqi_skip_device(u8 *scsi3addr)
Kevin Barnett6c223762016-06-27 16:41:00 -05001880{
Kevin Barnett94086f52017-05-03 18:54:31 -05001881 /* Ignore all masked devices. */
1882 if (MASKED_DEVICE(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05001883 return true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001884
1885 return false;
1886}
1887
Dave Carrollcd128242018-12-07 16:28:47 -06001888static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1889{
1890 return !device->is_physical_device ||
1891 !pqi_skip_device(device->scsi3addr);
1892}
1893
Kevin Barnett6c223762016-06-27 16:41:00 -05001894static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1895{
1896 int i;
1897 int rc;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001898 LIST_HEAD(new_device_list_head);
Kevin Barnett6c223762016-06-27 16:41:00 -05001899 struct report_phys_lun_extended *physdev_list = NULL;
1900 struct report_log_lun_extended *logdev_list = NULL;
1901 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1902 struct report_log_lun_extended_entry *log_lun_ext_entry;
1903 struct bmic_identify_physical_device *id_phys = NULL;
1904 u32 num_physicals;
1905 u32 num_logicals;
1906 struct pqi_scsi_dev **new_device_list = NULL;
1907 struct pqi_scsi_dev *device;
1908 struct pqi_scsi_dev *next;
1909 unsigned int num_new_devices;
1910 unsigned int num_valid_devices;
1911 bool is_physical_device;
1912 u8 *scsi3addr;
1913 static char *out_of_memory_msg =
Kevin Barnett6de783f2017-05-03 18:55:19 -05001914 "failed to allocate memory, device discovery stopped";
Kevin Barnett6c223762016-06-27 16:41:00 -05001915
Kevin Barnett6c223762016-06-27 16:41:00 -05001916 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1917 if (rc)
1918 goto out;
1919
1920 if (physdev_list)
1921 num_physicals =
1922 get_unaligned_be32(&physdev_list->header.list_length)
1923 / sizeof(physdev_list->lun_entries[0]);
1924 else
1925 num_physicals = 0;
1926
1927 if (logdev_list)
1928 num_logicals =
1929 get_unaligned_be32(&logdev_list->header.list_length)
1930 / sizeof(logdev_list->lun_entries[0]);
1931 else
1932 num_logicals = 0;
1933
1934 if (num_physicals) {
1935 /*
1936 * We need this buffer for calls to pqi_get_physical_disk_info()
1937 * below. We allocate it here instead of inside
1938 * pqi_get_physical_disk_info() because it's a fairly large
1939 * buffer.
1940 */
1941 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1942 if (!id_phys) {
1943 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1944 out_of_memory_msg);
1945 rc = -ENOMEM;
1946 goto out;
1947 }
1948 }
1949
1950 num_new_devices = num_physicals + num_logicals;
1951
Kees Cook6da2ec52018-06-12 13:55:00 -07001952 new_device_list = kmalloc_array(num_new_devices,
1953 sizeof(*new_device_list),
1954 GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05001955 if (!new_device_list) {
1956 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1957 rc = -ENOMEM;
1958 goto out;
1959 }
1960
1961 for (i = 0; i < num_new_devices; i++) {
1962 device = kzalloc(sizeof(*device), GFP_KERNEL);
1963 if (!device) {
1964 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1965 out_of_memory_msg);
1966 rc = -ENOMEM;
1967 goto out;
1968 }
1969 list_add_tail(&device->new_device_list_entry,
1970 &new_device_list_head);
1971 }
1972
1973 device = NULL;
1974 num_valid_devices = 0;
1975
1976 for (i = 0; i < num_new_devices; i++) {
1977
1978 if (i < num_physicals) {
1979 is_physical_device = true;
1980 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1981 log_lun_ext_entry = NULL;
1982 scsi3addr = phys_lun_ext_entry->lunid;
1983 } else {
1984 is_physical_device = false;
1985 phys_lun_ext_entry = NULL;
1986 log_lun_ext_entry =
1987 &logdev_list->lun_entries[i - num_physicals];
1988 scsi3addr = log_lun_ext_entry->lunid;
1989 }
1990
Kevin Barnett94086f52017-05-03 18:54:31 -05001991 if (is_physical_device && pqi_skip_device(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05001992 continue;
1993
1994 if (device)
1995 device = list_next_entry(device, new_device_list_entry);
1996 else
1997 device = list_first_entry(&new_device_list_head,
1998 struct pqi_scsi_dev, new_device_list_entry);
1999
2000 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2001 device->is_physical_device = is_physical_device;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05002002 if (!is_physical_device)
2003 device->is_external_raid_device =
2004 pqi_is_external_raid_addr(scsi3addr);
Kevin Barnett6c223762016-06-27 16:41:00 -05002005
2006 /* Gather information about the device. */
2007 rc = pqi_get_device_info(ctrl_info, device);
2008 if (rc == -ENOMEM) {
2009 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2010 out_of_memory_msg);
2011 goto out;
2012 }
2013 if (rc) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05002014 if (device->is_physical_device)
2015 dev_warn(&ctrl_info->pci_dev->dev,
2016 "obtaining device info failed, skipping physical device %016llx\n",
2017 get_unaligned_be64(
2018 &phys_lun_ext_entry->wwid));
2019 else
2020 dev_warn(&ctrl_info->pci_dev->dev,
2021 "obtaining device info failed, skipping logical device %08x%08x\n",
2022 *((u32 *)&device->scsi3addr),
2023 *((u32 *)&device->scsi3addr[4]));
Kevin Barnett6c223762016-06-27 16:41:00 -05002024 rc = 0;
2025 continue;
2026 }
2027
2028 if (!pqi_is_supported_device(device))
2029 continue;
2030
2031 pqi_assign_bus_target_lun(device);
2032
Kevin Barnett6c223762016-06-27 16:41:00 -05002033 if (device->is_physical_device) {
2034 device->wwid = phys_lun_ext_entry->wwid;
2035 if ((phys_lun_ext_entry->device_flags &
2036 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
2037 phys_lun_ext_entry->aio_handle)
2038 device->aio_enabled = true;
2039 } else {
2040 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2041 sizeof(device->volume_id));
2042 }
2043
2044 switch (device->devtype) {
2045 case TYPE_DISK:
2046 case TYPE_ZBC:
2047 case TYPE_ENCLOSURE:
2048 if (device->is_physical_device) {
2049 device->sas_address =
2050 get_unaligned_be64(&device->wwid);
2051 if (device->devtype == TYPE_DISK ||
2052 device->devtype == TYPE_ZBC) {
2053 device->aio_handle =
2054 phys_lun_ext_entry->aio_handle;
2055 pqi_get_physical_disk_info(ctrl_info,
2056 device, id_phys);
2057 }
2058 }
2059 break;
2060 }
2061
2062 new_device_list[num_valid_devices++] = device;
2063 }
2064
2065 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2066
2067out:
2068 list_for_each_entry_safe(device, next, &new_device_list_head,
2069 new_device_list_entry) {
2070 if (device->keep_device)
2071 continue;
2072 list_del(&device->new_device_list_entry);
2073 pqi_free_device(device);
2074 }
2075
2076 kfree(new_device_list);
2077 kfree(physdev_list);
2078 kfree(logdev_list);
2079 kfree(id_phys);
2080
2081 return rc;
2082}
2083
2084static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2085{
2086 unsigned long flags;
2087 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002088
Kevin Barnetta37ef742017-05-03 18:52:22 -05002089 while (1) {
2090 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002091
Kevin Barnetta37ef742017-05-03 18:52:22 -05002092 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2093 struct pqi_scsi_dev, scsi_device_list_entry);
2094 if (device)
2095 list_del(&device->scsi_device_list_entry);
2096
2097 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2098 flags);
2099
2100 if (!device)
2101 break;
2102
Kevin Barnett6c223762016-06-27 16:41:00 -05002103 if (device->sdev)
2104 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05002105 pqi_free_device(device);
2106 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002107}
2108
2109static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2110{
2111 int rc;
2112
2113 if (pqi_ctrl_offline(ctrl_info))
2114 return -ENXIO;
2115
2116 mutex_lock(&ctrl_info->scan_mutex);
2117
2118 rc = pqi_update_scsi_devices(ctrl_info);
2119 if (rc)
Kevin Barnett5f310422017-05-03 18:54:55 -05002120 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002121
2122 mutex_unlock(&ctrl_info->scan_mutex);
2123
2124 return rc;
2125}
2126
2127static void pqi_scan_start(struct Scsi_Host *shost)
2128{
2129 pqi_scan_scsi_devices(shost_to_hba(shost));
2130}
2131
2132/* Returns TRUE if scan is finished. */
2133
2134static int pqi_scan_finished(struct Scsi_Host *shost,
2135 unsigned long elapsed_time)
2136{
2137 struct pqi_ctrl_info *ctrl_info;
2138
2139 ctrl_info = shost_priv(shost);
2140
2141 return !mutex_is_locked(&ctrl_info->scan_mutex);
2142}
2143
Kevin Barnett061ef062017-05-03 18:53:05 -05002144static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2145{
2146 mutex_lock(&ctrl_info->scan_mutex);
2147 mutex_unlock(&ctrl_info->scan_mutex);
2148}
2149
2150static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2151{
2152 mutex_lock(&ctrl_info->lun_reset_mutex);
2153 mutex_unlock(&ctrl_info->lun_reset_mutex);
2154}
2155
Kevin Barnett6c223762016-06-27 16:41:00 -05002156static inline void pqi_set_encryption_info(
2157 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2158 u64 first_block)
2159{
2160 u32 volume_blk_size;
2161
2162 /*
2163 * Set the encryption tweak values based on logical block address.
2164 * If the block size is 512, the tweak value is equal to the LBA.
2165 * For other block sizes, tweak value is (LBA * block size) / 512.
2166 */
2167 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2168 if (volume_blk_size != 512)
2169 first_block = (first_block * volume_blk_size) / 512;
2170
2171 encryption_info->data_encryption_key_index =
2172 get_unaligned_le16(&raid_map->data_encryption_key_index);
2173 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2174 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2175}
2176
2177/*
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002178 * Attempt to perform RAID bypass mapping for a logical volume I/O.
Kevin Barnett6c223762016-06-27 16:41:00 -05002179 */
2180
2181#define PQI_RAID_BYPASS_INELIGIBLE 1
2182
2183static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2184 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2185 struct pqi_queue_group *queue_group)
2186{
2187 struct raid_map *raid_map;
2188 bool is_write = false;
2189 u32 map_index;
2190 u64 first_block;
2191 u64 last_block;
2192 u32 block_cnt;
2193 u32 blocks_per_row;
2194 u64 first_row;
2195 u64 last_row;
2196 u32 first_row_offset;
2197 u32 last_row_offset;
2198 u32 first_column;
2199 u32 last_column;
2200 u64 r0_first_row;
2201 u64 r0_last_row;
2202 u32 r5or6_blocks_per_row;
2203 u64 r5or6_first_row;
2204 u64 r5or6_last_row;
2205 u32 r5or6_first_row_offset;
2206 u32 r5or6_last_row_offset;
2207 u32 r5or6_first_column;
2208 u32 r5or6_last_column;
2209 u16 data_disks_per_row;
2210 u32 total_disks_per_row;
2211 u16 layout_map_count;
2212 u32 stripesize;
2213 u16 strip_size;
2214 u32 first_group;
2215 u32 last_group;
2216 u32 current_group;
2217 u32 map_row;
2218 u32 aio_handle;
2219 u64 disk_block;
2220 u32 disk_block_cnt;
2221 u8 cdb[16];
2222 u8 cdb_length;
2223 int offload_to_mirror;
2224 struct pqi_encryption_info *encryption_info_ptr;
2225 struct pqi_encryption_info encryption_info;
2226#if BITS_PER_LONG == 32
2227 u64 tmpdiv;
2228#endif
2229
2230 /* Check for valid opcode, get LBA and block count. */
2231 switch (scmd->cmnd[0]) {
2232 case WRITE_6:
2233 is_write = true;
2234 /* fall through */
2235 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002236 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2237 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002238 block_cnt = (u32)scmd->cmnd[4];
2239 if (block_cnt == 0)
2240 block_cnt = 256;
2241 break;
2242 case WRITE_10:
2243 is_write = true;
2244 /* fall through */
2245 case READ_10:
2246 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2247 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2248 break;
2249 case WRITE_12:
2250 is_write = true;
2251 /* fall through */
2252 case READ_12:
2253 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2254 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2255 break;
2256 case WRITE_16:
2257 is_write = true;
2258 /* fall through */
2259 case READ_16:
2260 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2261 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2262 break;
2263 default:
2264 /* Process via normal I/O path. */
2265 return PQI_RAID_BYPASS_INELIGIBLE;
2266 }
2267
2268 /* Check for write to non-RAID-0. */
2269 if (is_write && device->raid_level != SA_RAID_0)
2270 return PQI_RAID_BYPASS_INELIGIBLE;
2271
2272 if (unlikely(block_cnt == 0))
2273 return PQI_RAID_BYPASS_INELIGIBLE;
2274
2275 last_block = first_block + block_cnt - 1;
2276 raid_map = device->raid_map;
2277
2278 /* Check for invalid block or wraparound. */
2279 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2280 last_block < first_block)
2281 return PQI_RAID_BYPASS_INELIGIBLE;
2282
2283 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2284 strip_size = get_unaligned_le16(&raid_map->strip_size);
2285 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2286
2287 /* Calculate stripe information for the request. */
2288 blocks_per_row = data_disks_per_row * strip_size;
2289#if BITS_PER_LONG == 32
2290 tmpdiv = first_block;
2291 do_div(tmpdiv, blocks_per_row);
2292 first_row = tmpdiv;
2293 tmpdiv = last_block;
2294 do_div(tmpdiv, blocks_per_row);
2295 last_row = tmpdiv;
2296 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2297 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2298 tmpdiv = first_row_offset;
2299 do_div(tmpdiv, strip_size);
2300 first_column = tmpdiv;
2301 tmpdiv = last_row_offset;
2302 do_div(tmpdiv, strip_size);
2303 last_column = tmpdiv;
2304#else
2305 first_row = first_block / blocks_per_row;
2306 last_row = last_block / blocks_per_row;
2307 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2308 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2309 first_column = first_row_offset / strip_size;
2310 last_column = last_row_offset / strip_size;
2311#endif
2312
2313 /* If this isn't a single row/column then give to the controller. */
2314 if (first_row != last_row || first_column != last_column)
2315 return PQI_RAID_BYPASS_INELIGIBLE;
2316
2317 /* Proceeding with driver mapping. */
2318 total_disks_per_row = data_disks_per_row +
2319 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2320 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2321 get_unaligned_le16(&raid_map->row_cnt);
2322 map_index = (map_row * total_disks_per_row) + first_column;
2323
2324 /* RAID 1 */
2325 if (device->raid_level == SA_RAID_1) {
2326 if (device->offload_to_mirror)
2327 map_index += data_disks_per_row;
2328 device->offload_to_mirror = !device->offload_to_mirror;
2329 } else if (device->raid_level == SA_RAID_ADM) {
2330 /* RAID ADM */
2331 /*
2332 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2333 * divisible by 3.
2334 */
2335 offload_to_mirror = device->offload_to_mirror;
2336 if (offload_to_mirror == 0) {
2337 /* use physical disk in the first mirrored group. */
2338 map_index %= data_disks_per_row;
2339 } else {
2340 do {
2341 /*
2342 * Determine mirror group that map_index
2343 * indicates.
2344 */
2345 current_group = map_index / data_disks_per_row;
2346
2347 if (offload_to_mirror != current_group) {
2348 if (current_group <
2349 layout_map_count - 1) {
2350 /*
2351 * Select raid index from
2352 * next group.
2353 */
2354 map_index += data_disks_per_row;
2355 current_group++;
2356 } else {
2357 /*
2358 * Select raid index from first
2359 * group.
2360 */
2361 map_index %= data_disks_per_row;
2362 current_group = 0;
2363 }
2364 }
2365 } while (offload_to_mirror != current_group);
2366 }
2367
2368 /* Set mirror group to use next time. */
2369 offload_to_mirror =
2370 (offload_to_mirror >= layout_map_count - 1) ?
2371 0 : offload_to_mirror + 1;
2372 WARN_ON(offload_to_mirror >= layout_map_count);
2373 device->offload_to_mirror = offload_to_mirror;
2374 /*
2375 * Avoid direct use of device->offload_to_mirror within this
2376 * function since multiple threads might simultaneously
2377 * increment it beyond the range of device->layout_map_count -1.
2378 */
2379 } else if ((device->raid_level == SA_RAID_5 ||
2380 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2381 /* RAID 50/60 */
2382 /* Verify first and last block are in same RAID group */
2383 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2384 stripesize = r5or6_blocks_per_row * layout_map_count;
2385#if BITS_PER_LONG == 32
2386 tmpdiv = first_block;
2387 first_group = do_div(tmpdiv, stripesize);
2388 tmpdiv = first_group;
2389 do_div(tmpdiv, r5or6_blocks_per_row);
2390 first_group = tmpdiv;
2391 tmpdiv = last_block;
2392 last_group = do_div(tmpdiv, stripesize);
2393 tmpdiv = last_group;
2394 do_div(tmpdiv, r5or6_blocks_per_row);
2395 last_group = tmpdiv;
2396#else
2397 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2398 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2399#endif
2400 if (first_group != last_group)
2401 return PQI_RAID_BYPASS_INELIGIBLE;
2402
2403 /* Verify request is in a single row of RAID 5/6 */
2404#if BITS_PER_LONG == 32
2405 tmpdiv = first_block;
2406 do_div(tmpdiv, stripesize);
2407 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2408 tmpdiv = last_block;
2409 do_div(tmpdiv, stripesize);
2410 r5or6_last_row = r0_last_row = tmpdiv;
2411#else
2412 first_row = r5or6_first_row = r0_first_row =
2413 first_block / stripesize;
2414 r5or6_last_row = r0_last_row = last_block / stripesize;
2415#endif
2416 if (r5or6_first_row != r5or6_last_row)
2417 return PQI_RAID_BYPASS_INELIGIBLE;
2418
2419 /* Verify request is in a single column */
2420#if BITS_PER_LONG == 32
2421 tmpdiv = first_block;
2422 first_row_offset = do_div(tmpdiv, stripesize);
2423 tmpdiv = first_row_offset;
2424 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2425 r5or6_first_row_offset = first_row_offset;
2426 tmpdiv = last_block;
2427 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2428 tmpdiv = r5or6_last_row_offset;
2429 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2430 tmpdiv = r5or6_first_row_offset;
2431 do_div(tmpdiv, strip_size);
2432 first_column = r5or6_first_column = tmpdiv;
2433 tmpdiv = r5or6_last_row_offset;
2434 do_div(tmpdiv, strip_size);
2435 r5or6_last_column = tmpdiv;
2436#else
2437 first_row_offset = r5or6_first_row_offset =
2438 (u32)((first_block % stripesize) %
2439 r5or6_blocks_per_row);
2440
2441 r5or6_last_row_offset =
2442 (u32)((last_block % stripesize) %
2443 r5or6_blocks_per_row);
2444
2445 first_column = r5or6_first_row_offset / strip_size;
2446 r5or6_first_column = first_column;
2447 r5or6_last_column = r5or6_last_row_offset / strip_size;
2448#endif
2449 if (r5or6_first_column != r5or6_last_column)
2450 return PQI_RAID_BYPASS_INELIGIBLE;
2451
2452 /* Request is eligible */
2453 map_row =
2454 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2455 get_unaligned_le16(&raid_map->row_cnt);
2456
2457 map_index = (first_group *
2458 (get_unaligned_le16(&raid_map->row_cnt) *
2459 total_disks_per_row)) +
2460 (map_row * total_disks_per_row) + first_column;
2461 }
2462
Kevin Barnett6c223762016-06-27 16:41:00 -05002463 aio_handle = raid_map->disk_data[map_index].aio_handle;
2464 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2465 first_row * strip_size +
2466 (first_row_offset - first_column * strip_size);
2467 disk_block_cnt = block_cnt;
2468
2469 /* Handle differing logical/physical block sizes. */
2470 if (raid_map->phys_blk_shift) {
2471 disk_block <<= raid_map->phys_blk_shift;
2472 disk_block_cnt <<= raid_map->phys_blk_shift;
2473 }
2474
2475 if (unlikely(disk_block_cnt > 0xffff))
2476 return PQI_RAID_BYPASS_INELIGIBLE;
2477
2478 /* Build the new CDB for the physical disk I/O. */
2479 if (disk_block > 0xffffffff) {
2480 cdb[0] = is_write ? WRITE_16 : READ_16;
2481 cdb[1] = 0;
2482 put_unaligned_be64(disk_block, &cdb[2]);
2483 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2484 cdb[14] = 0;
2485 cdb[15] = 0;
2486 cdb_length = 16;
2487 } else {
2488 cdb[0] = is_write ? WRITE_10 : READ_10;
2489 cdb[1] = 0;
2490 put_unaligned_be32((u32)disk_block, &cdb[2]);
2491 cdb[6] = 0;
2492 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2493 cdb[9] = 0;
2494 cdb_length = 10;
2495 }
2496
2497 if (get_unaligned_le16(&raid_map->flags) &
2498 RAID_MAP_ENCRYPTION_ENABLED) {
2499 pqi_set_encryption_info(&encryption_info, raid_map,
2500 first_block);
2501 encryption_info_ptr = &encryption_info;
2502 } else {
2503 encryption_info_ptr = NULL;
2504 }
2505
2506 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05002507 cdb, cdb_length, queue_group, encryption_info_ptr, true);
Kevin Barnett6c223762016-06-27 16:41:00 -05002508}
2509
2510#define PQI_STATUS_IDLE 0x0
2511
2512#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2513#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2514
2515#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2516#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2517#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2518#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2519#define PQI_DEVICE_STATE_ERROR 0x4
2520
2521#define PQI_MODE_READY_TIMEOUT_SECS 30
2522#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2523
2524static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2525{
2526 struct pqi_device_registers __iomem *pqi_registers;
2527 unsigned long timeout;
2528 u64 signature;
2529 u8 status;
2530
2531 pqi_registers = ctrl_info->pqi_registers;
2532 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2533
2534 while (1) {
2535 signature = readq(&pqi_registers->signature);
2536 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2537 sizeof(signature)) == 0)
2538 break;
2539 if (time_after(jiffies, timeout)) {
2540 dev_err(&ctrl_info->pci_dev->dev,
2541 "timed out waiting for PQI signature\n");
2542 return -ETIMEDOUT;
2543 }
2544 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2545 }
2546
2547 while (1) {
2548 status = readb(&pqi_registers->function_and_status_code);
2549 if (status == PQI_STATUS_IDLE)
2550 break;
2551 if (time_after(jiffies, timeout)) {
2552 dev_err(&ctrl_info->pci_dev->dev,
2553 "timed out waiting for PQI IDLE\n");
2554 return -ETIMEDOUT;
2555 }
2556 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2557 }
2558
2559 while (1) {
2560 if (readl(&pqi_registers->device_status) ==
2561 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2562 break;
2563 if (time_after(jiffies, timeout)) {
2564 dev_err(&ctrl_info->pci_dev->dev,
2565 "timed out waiting for PQI all registers ready\n");
2566 return -ETIMEDOUT;
2567 }
2568 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2569 }
2570
2571 return 0;
2572}
2573
2574static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2575{
2576 struct pqi_scsi_dev *device;
2577
2578 device = io_request->scmd->device->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002579 device->raid_bypass_enabled = false;
Kevin Barnett376fb882017-05-03 18:54:43 -05002580 device->aio_enabled = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05002581}
2582
Kevin Barnettd87d5472017-05-03 18:54:00 -05002583static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
Kevin Barnett6c223762016-06-27 16:41:00 -05002584{
2585 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002586 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002587
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002588 device = sdev->hostdata;
2589 if (device->device_offline)
2590 return;
2591
2592 device->device_offline = true;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002593 ctrl_info = shost_to_hba(sdev->host);
2594 pqi_schedule_rescan_worker(ctrl_info);
Dave Carrolla9a68102018-12-07 16:29:37 -06002595 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002596 path, ctrl_info->scsi_host->host_no, device->bus,
2597 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002598}
2599
2600static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2601{
2602 u8 scsi_status;
2603 u8 host_byte;
2604 struct scsi_cmnd *scmd;
2605 struct pqi_raid_error_info *error_info;
2606 size_t sense_data_length;
2607 int residual_count;
2608 int xfer_count;
2609 struct scsi_sense_hdr sshdr;
2610
2611 scmd = io_request->scmd;
2612 if (!scmd)
2613 return;
2614
2615 error_info = io_request->error_info;
2616 scsi_status = error_info->status;
2617 host_byte = DID_OK;
2618
Kevin Barnettf5b63202017-05-03 18:55:07 -05002619 switch (error_info->data_out_result) {
2620 case PQI_DATA_IN_OUT_GOOD:
2621 break;
2622 case PQI_DATA_IN_OUT_UNDERFLOW:
Kevin Barnett6c223762016-06-27 16:41:00 -05002623 xfer_count =
2624 get_unaligned_le32(&error_info->data_out_transferred);
2625 residual_count = scsi_bufflen(scmd) - xfer_count;
2626 scsi_set_resid(scmd, residual_count);
2627 if (xfer_count < scmd->underflow)
2628 host_byte = DID_SOFT_ERROR;
Kevin Barnettf5b63202017-05-03 18:55:07 -05002629 break;
2630 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2631 case PQI_DATA_IN_OUT_ABORTED:
2632 host_byte = DID_ABORT;
2633 break;
2634 case PQI_DATA_IN_OUT_TIMEOUT:
2635 host_byte = DID_TIME_OUT;
2636 break;
2637 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2638 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2639 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2640 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2641 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2642 case PQI_DATA_IN_OUT_ERROR:
2643 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2644 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2645 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2646 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2647 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2648 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2649 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2650 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2651 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2652 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2653 default:
2654 host_byte = DID_ERROR;
2655 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002656 }
2657
2658 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2659 if (sense_data_length == 0)
2660 sense_data_length =
2661 get_unaligned_le16(&error_info->response_data_length);
2662 if (sense_data_length) {
2663 if (sense_data_length > sizeof(error_info->data))
2664 sense_data_length = sizeof(error_info->data);
2665
2666 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2667 scsi_normalize_sense(error_info->data,
2668 sense_data_length, &sshdr) &&
2669 sshdr.sense_key == HARDWARE_ERROR &&
2670 sshdr.asc == 0x3e &&
2671 sshdr.ascq == 0x1) {
Kevin Barnettd87d5472017-05-03 18:54:00 -05002672 pqi_take_device_offline(scmd->device, "RAID");
Kevin Barnett6c223762016-06-27 16:41:00 -05002673 host_byte = DID_NO_CONNECT;
2674 }
2675
2676 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2677 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2678 memcpy(scmd->sense_buffer, error_info->data,
2679 sense_data_length);
2680 }
2681
2682 scmd->result = scsi_status;
2683 set_host_byte(scmd, host_byte);
2684}
2685
2686static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2687{
2688 u8 scsi_status;
2689 u8 host_byte;
2690 struct scsi_cmnd *scmd;
2691 struct pqi_aio_error_info *error_info;
2692 size_t sense_data_length;
2693 int residual_count;
2694 int xfer_count;
2695 bool device_offline;
2696
2697 scmd = io_request->scmd;
2698 error_info = io_request->error_info;
2699 host_byte = DID_OK;
2700 sense_data_length = 0;
2701 device_offline = false;
2702
2703 switch (error_info->service_response) {
2704 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2705 scsi_status = error_info->status;
2706 break;
2707 case PQI_AIO_SERV_RESPONSE_FAILURE:
2708 switch (error_info->status) {
2709 case PQI_AIO_STATUS_IO_ABORTED:
2710 scsi_status = SAM_STAT_TASK_ABORTED;
2711 break;
2712 case PQI_AIO_STATUS_UNDERRUN:
2713 scsi_status = SAM_STAT_GOOD;
2714 residual_count = get_unaligned_le32(
2715 &error_info->residual_count);
2716 scsi_set_resid(scmd, residual_count);
2717 xfer_count = scsi_bufflen(scmd) - residual_count;
2718 if (xfer_count < scmd->underflow)
2719 host_byte = DID_SOFT_ERROR;
2720 break;
2721 case PQI_AIO_STATUS_OVERRUN:
2722 scsi_status = SAM_STAT_GOOD;
2723 break;
2724 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2725 pqi_aio_path_disabled(io_request);
2726 scsi_status = SAM_STAT_GOOD;
2727 io_request->status = -EAGAIN;
2728 break;
2729 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2730 case PQI_AIO_STATUS_INVALID_DEVICE:
Kevin Barnett376fb882017-05-03 18:54:43 -05002731 if (!io_request->raid_bypass) {
2732 device_offline = true;
2733 pqi_take_device_offline(scmd->device, "AIO");
2734 host_byte = DID_NO_CONNECT;
2735 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002736 scsi_status = SAM_STAT_CHECK_CONDITION;
2737 break;
2738 case PQI_AIO_STATUS_IO_ERROR:
2739 default:
2740 scsi_status = SAM_STAT_CHECK_CONDITION;
2741 break;
2742 }
2743 break;
2744 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2745 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2746 scsi_status = SAM_STAT_GOOD;
2747 break;
2748 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2749 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2750 default:
2751 scsi_status = SAM_STAT_CHECK_CONDITION;
2752 break;
2753 }
2754
2755 if (error_info->data_present) {
2756 sense_data_length =
2757 get_unaligned_le16(&error_info->data_length);
2758 if (sense_data_length) {
2759 if (sense_data_length > sizeof(error_info->data))
2760 sense_data_length = sizeof(error_info->data);
2761 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2762 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2763 memcpy(scmd->sense_buffer, error_info->data,
2764 sense_data_length);
2765 }
2766 }
2767
2768 if (device_offline && sense_data_length == 0)
2769 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2770 0x3e, 0x1);
2771
2772 scmd->result = scsi_status;
2773 set_host_byte(scmd, host_byte);
2774}
2775
2776static void pqi_process_io_error(unsigned int iu_type,
2777 struct pqi_io_request *io_request)
2778{
2779 switch (iu_type) {
2780 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2781 pqi_process_raid_io_error(io_request);
2782 break;
2783 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2784 pqi_process_aio_io_error(io_request);
2785 break;
2786 }
2787}
2788
2789static int pqi_interpret_task_management_response(
2790 struct pqi_task_management_response *response)
2791{
2792 int rc;
2793
2794 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002795 case SOP_TMF_COMPLETE:
2796 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002797 rc = 0;
2798 break;
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06002799 case SOP_TMF_REJECTED:
2800 rc = -EAGAIN;
2801 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002802 default:
2803 rc = -EIO;
2804 break;
2805 }
2806
2807 return rc;
2808}
2809
2810static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2811 struct pqi_queue_group *queue_group)
2812{
2813 unsigned int num_responses;
2814 pqi_index_t oq_pi;
2815 pqi_index_t oq_ci;
2816 struct pqi_io_request *io_request;
2817 struct pqi_io_response *response;
2818 u16 request_id;
2819
2820 num_responses = 0;
2821 oq_ci = queue_group->oq_ci_copy;
2822
2823 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002824 oq_pi = readl(queue_group->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05002825 if (oq_pi == oq_ci)
2826 break;
2827
2828 num_responses++;
2829 response = queue_group->oq_element_array +
2830 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2831
2832 request_id = get_unaligned_le16(&response->request_id);
2833 WARN_ON(request_id >= ctrl_info->max_io_slots);
2834
2835 io_request = &ctrl_info->io_request_pool[request_id];
2836 WARN_ON(atomic_read(&io_request->refcount) == 0);
2837
2838 switch (response->header.iu_type) {
2839 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2840 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2841 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2842 break;
Kevin Barnettb212c252018-12-07 16:28:10 -06002843 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2844 io_request->status =
2845 get_unaligned_le16(
2846 &((struct pqi_vendor_general_response *)
2847 response)->status);
2848 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002849 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2850 io_request->status =
2851 pqi_interpret_task_management_response(
2852 (void *)response);
2853 break;
2854 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2855 pqi_aio_path_disabled(io_request);
2856 io_request->status = -EAGAIN;
2857 break;
2858 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2859 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2860 io_request->error_info = ctrl_info->error_buffer +
2861 (get_unaligned_le16(&response->error_index) *
2862 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2863 pqi_process_io_error(response->header.iu_type,
2864 io_request);
2865 break;
2866 default:
2867 dev_err(&ctrl_info->pci_dev->dev,
2868 "unexpected IU type: 0x%x\n",
2869 response->header.iu_type);
Kevin Barnett6c223762016-06-27 16:41:00 -05002870 break;
2871 }
2872
2873 io_request->io_complete_callback(io_request,
2874 io_request->context);
2875
2876 /*
2877 * Note that the I/O request structure CANNOT BE TOUCHED after
2878 * returning from the I/O completion callback!
2879 */
2880
2881 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2882 }
2883
2884 if (num_responses) {
2885 queue_group->oq_ci_copy = oq_ci;
2886 writel(oq_ci, queue_group->oq_ci);
2887 }
2888
2889 return num_responses;
2890}
2891
2892static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002893 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002894{
2895 unsigned int num_elements_used;
2896
2897 if (pi >= ci)
2898 num_elements_used = pi - ci;
2899 else
2900 num_elements_used = elements_in_queue - ci + pi;
2901
2902 return elements_in_queue - num_elements_used - 1;
2903}
2904
Kevin Barnett98f87662017-05-03 18:53:11 -05002905static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05002906 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2907{
2908 pqi_index_t iq_pi;
2909 pqi_index_t iq_ci;
2910 unsigned long flags;
2911 void *next_element;
Kevin Barnett6c223762016-06-27 16:41:00 -05002912 struct pqi_queue_group *queue_group;
2913
2914 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2915 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2916
Kevin Barnett6c223762016-06-27 16:41:00 -05002917 while (1) {
2918 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2919
2920 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002921 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002922
2923 if (pqi_num_elements_free(iq_pi, iq_ci,
2924 ctrl_info->num_elements_per_iq))
2925 break;
2926
2927 spin_unlock_irqrestore(
2928 &queue_group->submit_lock[RAID_PATH], flags);
2929
Kevin Barnett98f87662017-05-03 18:53:11 -05002930 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05002931 return;
Kevin Barnett6c223762016-06-27 16:41:00 -05002932 }
2933
2934 next_element = queue_group->iq_element_array[RAID_PATH] +
2935 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2936
2937 memcpy(next_element, iu, iu_length);
2938
2939 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
Kevin Barnett6c223762016-06-27 16:41:00 -05002940 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2941
2942 /*
2943 * This write notifies the controller that an IU is available to be
2944 * processed.
2945 */
2946 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2947
2948 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002949}
2950
2951static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2952 struct pqi_event *event)
2953{
2954 struct pqi_event_acknowledge_request request;
2955
2956 memset(&request, 0, sizeof(request));
2957
2958 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2959 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2960 &request.header.iu_length);
2961 request.event_type = event->event_type;
2962 request.event_id = event->event_id;
2963 request.additional_event_id = event->additional_event_id;
2964
Kevin Barnett98f87662017-05-03 18:53:11 -05002965 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
Kevin Barnett6c223762016-06-27 16:41:00 -05002966}
2967
2968static void pqi_event_worker(struct work_struct *work)
2969{
2970 unsigned int i;
2971 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002972 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002973
2974 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2975
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002976 pqi_ctrl_busy(ctrl_info);
2977 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
Kevin Barnett5f310422017-05-03 18:54:55 -05002978 if (pqi_ctrl_offline(ctrl_info))
2979 goto out;
2980
2981 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002982
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002983 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05002984 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002985 if (event->pending) {
2986 event->pending = false;
2987 pqi_acknowledge_event(ctrl_info, event);
Kevin Barnett6c223762016-06-27 16:41:00 -05002988 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002989 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05002990 }
2991
Kevin Barnett5f310422017-05-03 18:54:55 -05002992out:
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002993 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002994}
2995
Kevin Barnett98f87662017-05-03 18:53:11 -05002996#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
Kevin Barnett6c223762016-06-27 16:41:00 -05002997
Kees Cook74a0f572017-10-11 16:27:10 -07002998static void pqi_heartbeat_timer_handler(struct timer_list *t)
Kevin Barnett6c223762016-06-27 16:41:00 -05002999{
3000 int num_interrupts;
Kevin Barnett98f87662017-05-03 18:53:11 -05003001 u32 heartbeat_count;
Kees Cook74a0f572017-10-11 16:27:10 -07003002 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3003 heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003004
Kevin Barnett98f87662017-05-03 18:53:11 -05003005 pqi_check_ctrl_health(ctrl_info);
3006 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett061ef062017-05-03 18:53:05 -05003007 return;
3008
Kevin Barnett6c223762016-06-27 16:41:00 -05003009 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05003010 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003011
3012 if (num_interrupts == ctrl_info->previous_num_interrupts) {
Kevin Barnett98f87662017-05-03 18:53:11 -05003013 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3014 dev_err(&ctrl_info->pci_dev->dev,
3015 "no heartbeat detected - last heartbeat count: %u\n",
3016 heartbeat_count);
Kevin Barnett6c223762016-06-27 16:41:00 -05003017 pqi_take_ctrl_offline(ctrl_info);
3018 return;
3019 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003020 } else {
Kevin Barnett98f87662017-05-03 18:53:11 -05003021 ctrl_info->previous_num_interrupts = num_interrupts;
Kevin Barnett6c223762016-06-27 16:41:00 -05003022 }
3023
Kevin Barnett98f87662017-05-03 18:53:11 -05003024 ctrl_info->previous_heartbeat_count = heartbeat_count;
Kevin Barnett6c223762016-06-27 16:41:00 -05003025 mod_timer(&ctrl_info->heartbeat_timer,
3026 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3027}
3028
3029static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3030{
Kevin Barnett98f87662017-05-03 18:53:11 -05003031 if (!ctrl_info->heartbeat_counter)
3032 return;
3033
Kevin Barnett6c223762016-06-27 16:41:00 -05003034 ctrl_info->previous_num_interrupts =
3035 atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05003036 ctrl_info->previous_heartbeat_count =
3037 pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003038
Kevin Barnett6c223762016-06-27 16:41:00 -05003039 ctrl_info->heartbeat_timer.expires =
3040 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
Kevin Barnett061ef062017-05-03 18:53:05 -05003041 add_timer(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003042}
3043
3044static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3045{
Kevin Barnett98f87662017-05-03 18:53:11 -05003046 del_timer_sync(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003047}
3048
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003049static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05003050{
3051 int index;
3052
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003053 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3054 if (event_type == pqi_supported_event_types[index])
3055 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003056
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003057 return -1;
3058}
3059
3060static inline bool pqi_is_supported_event(unsigned int event_type)
3061{
3062 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05003063}
3064
3065static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3066{
3067 unsigned int num_events;
3068 pqi_index_t oq_pi;
3069 pqi_index_t oq_ci;
3070 struct pqi_event_queue *event_queue;
3071 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003072 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05003073 int event_index;
3074
3075 event_queue = &ctrl_info->event_queue;
3076 num_events = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05003077 oq_ci = event_queue->oq_ci_copy;
3078
3079 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003080 oq_pi = readl(event_queue->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003081 if (oq_pi == oq_ci)
3082 break;
3083
3084 num_events++;
3085 response = event_queue->oq_element_array +
3086 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3087
3088 event_index =
3089 pqi_event_type_to_event_index(response->event_type);
3090
3091 if (event_index >= 0) {
3092 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003093 event = &ctrl_info->events[event_index];
3094 event->pending = true;
3095 event->event_type = response->event_type;
3096 event->event_id = response->event_id;
3097 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05003098 response->additional_event_id;
Kevin Barnett6c223762016-06-27 16:41:00 -05003099 }
3100 }
3101
3102 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3103 }
3104
3105 if (num_events) {
3106 event_queue->oq_ci_copy = oq_ci;
3107 writel(oq_ci, event_queue->oq_ci);
Kevin Barnett98f87662017-05-03 18:53:11 -05003108 schedule_work(&ctrl_info->event_work);
Kevin Barnett6c223762016-06-27 16:41:00 -05003109 }
3110
3111 return num_events;
3112}
3113
Kevin Barnett061ef062017-05-03 18:53:05 -05003114#define PQI_LEGACY_INTX_MASK 0x1
3115
3116static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3117 bool enable_intx)
3118{
3119 u32 intx_mask;
3120 struct pqi_device_registers __iomem *pqi_registers;
3121 volatile void __iomem *register_addr;
3122
3123 pqi_registers = ctrl_info->pqi_registers;
3124
3125 if (enable_intx)
3126 register_addr = &pqi_registers->legacy_intx_mask_clear;
3127 else
3128 register_addr = &pqi_registers->legacy_intx_mask_set;
3129
3130 intx_mask = readl(register_addr);
3131 intx_mask |= PQI_LEGACY_INTX_MASK;
3132 writel(intx_mask, register_addr);
3133}
3134
3135static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3136 enum pqi_irq_mode new_mode)
3137{
3138 switch (ctrl_info->irq_mode) {
3139 case IRQ_MODE_MSIX:
3140 switch (new_mode) {
3141 case IRQ_MODE_MSIX:
3142 break;
3143 case IRQ_MODE_INTX:
3144 pqi_configure_legacy_intx(ctrl_info, true);
Kevin Barnett061ef062017-05-03 18:53:05 -05003145 sis_enable_intx(ctrl_info);
3146 break;
3147 case IRQ_MODE_NONE:
Kevin Barnett061ef062017-05-03 18:53:05 -05003148 break;
3149 }
3150 break;
3151 case IRQ_MODE_INTX:
3152 switch (new_mode) {
3153 case IRQ_MODE_MSIX:
3154 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003155 sis_enable_msix(ctrl_info);
3156 break;
3157 case IRQ_MODE_INTX:
3158 break;
3159 case IRQ_MODE_NONE:
3160 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003161 break;
3162 }
3163 break;
3164 case IRQ_MODE_NONE:
3165 switch (new_mode) {
3166 case IRQ_MODE_MSIX:
3167 sis_enable_msix(ctrl_info);
3168 break;
3169 case IRQ_MODE_INTX:
3170 pqi_configure_legacy_intx(ctrl_info, true);
3171 sis_enable_intx(ctrl_info);
3172 break;
3173 case IRQ_MODE_NONE:
3174 break;
3175 }
3176 break;
3177 }
3178
3179 ctrl_info->irq_mode = new_mode;
3180}
3181
3182#define PQI_LEGACY_INTX_PENDING 0x1
3183
3184static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3185{
3186 bool valid_irq;
3187 u32 intx_status;
3188
3189 switch (ctrl_info->irq_mode) {
3190 case IRQ_MODE_MSIX:
3191 valid_irq = true;
3192 break;
3193 case IRQ_MODE_INTX:
3194 intx_status =
3195 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3196 if (intx_status & PQI_LEGACY_INTX_PENDING)
3197 valid_irq = true;
3198 else
3199 valid_irq = false;
3200 break;
3201 case IRQ_MODE_NONE:
3202 default:
3203 valid_irq = false;
3204 break;
3205 }
3206
3207 return valid_irq;
3208}
3209
Kevin Barnett6c223762016-06-27 16:41:00 -05003210static irqreturn_t pqi_irq_handler(int irq, void *data)
3211{
3212 struct pqi_ctrl_info *ctrl_info;
3213 struct pqi_queue_group *queue_group;
3214 unsigned int num_responses_handled;
3215
3216 queue_group = data;
3217 ctrl_info = queue_group->ctrl_info;
3218
Kevin Barnett061ef062017-05-03 18:53:05 -05003219 if (!pqi_is_valid_irq(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003220 return IRQ_NONE;
3221
3222 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3223
3224 if (irq == ctrl_info->event_irq)
3225 num_responses_handled += pqi_process_event_intr(ctrl_info);
3226
3227 if (num_responses_handled)
3228 atomic_inc(&ctrl_info->num_interrupts);
3229
3230 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3231 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3232
3233 return IRQ_HANDLED;
3234}
3235
3236static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3237{
Kevin Barnettd91d7822017-05-03 18:53:30 -05003238 struct pci_dev *pci_dev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05003239 int i;
3240 int rc;
3241
Kevin Barnettd91d7822017-05-03 18:53:30 -05003242 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05003243
3244 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003245 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
Christoph Hellwig52198222016-11-01 08:12:49 -06003246 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003247 if (rc) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003248 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05003249 "irq %u init failed with error %d\n",
Kevin Barnettd91d7822017-05-03 18:53:30 -05003250 pci_irq_vector(pci_dev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05003251 return rc;
3252 }
3253 ctrl_info->num_msix_vectors_initialized++;
3254 }
3255
3256 return 0;
3257}
3258
Kevin Barnett98bf0612017-05-03 18:52:28 -05003259static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3260{
3261 int i;
3262
3263 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3264 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3265 &ctrl_info->queue_groups[i]);
3266
3267 ctrl_info->num_msix_vectors_initialized = 0;
3268}
3269
Kevin Barnett6c223762016-06-27 16:41:00 -05003270static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3271{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003272 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003273
Kevin Barnett98bf0612017-05-03 18:52:28 -05003274 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003275 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3276 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003277 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003278 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003279 "MSI-X init failed with error %d\n",
3280 num_vectors_enabled);
3281 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003282 }
3283
Kevin Barnett98bf0612017-05-03 18:52:28 -05003284 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
Kevin Barnett061ef062017-05-03 18:53:05 -05003285 ctrl_info->irq_mode = IRQ_MODE_MSIX;
Kevin Barnett6c223762016-06-27 16:41:00 -05003286 return 0;
3287}
3288
Kevin Barnett98bf0612017-05-03 18:52:28 -05003289static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3290{
3291 if (ctrl_info->num_msix_vectors_enabled) {
3292 pci_free_irq_vectors(ctrl_info->pci_dev);
3293 ctrl_info->num_msix_vectors_enabled = 0;
3294 }
3295}
3296
Kevin Barnett6c223762016-06-27 16:41:00 -05003297static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3298{
3299 unsigned int i;
3300 size_t alloc_length;
3301 size_t element_array_length_per_iq;
3302 size_t element_array_length_per_oq;
3303 void *element_array;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003304 void __iomem *next_queue_index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003305 void *aligned_pointer;
3306 unsigned int num_inbound_queues;
3307 unsigned int num_outbound_queues;
3308 unsigned int num_queue_indexes;
3309 struct pqi_queue_group *queue_group;
3310
3311 element_array_length_per_iq =
3312 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3313 ctrl_info->num_elements_per_iq;
3314 element_array_length_per_oq =
3315 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3316 ctrl_info->num_elements_per_oq;
3317 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3318 num_outbound_queues = ctrl_info->num_queue_groups;
3319 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3320
3321 aligned_pointer = NULL;
3322
3323 for (i = 0; i < num_inbound_queues; i++) {
3324 aligned_pointer = PTR_ALIGN(aligned_pointer,
3325 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3326 aligned_pointer += element_array_length_per_iq;
3327 }
3328
3329 for (i = 0; i < num_outbound_queues; i++) {
3330 aligned_pointer = PTR_ALIGN(aligned_pointer,
3331 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3332 aligned_pointer += element_array_length_per_oq;
3333 }
3334
3335 aligned_pointer = PTR_ALIGN(aligned_pointer,
3336 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3337 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3338 PQI_EVENT_OQ_ELEMENT_LENGTH;
3339
3340 for (i = 0; i < num_queue_indexes; i++) {
3341 aligned_pointer = PTR_ALIGN(aligned_pointer,
3342 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3343 aligned_pointer += sizeof(pqi_index_t);
3344 }
3345
3346 alloc_length = (size_t)aligned_pointer +
3347 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3348
Kevin Barnette1d213b2017-05-03 18:53:18 -05003349 alloc_length += PQI_EXTRA_SGL_MEMORY;
3350
Kevin Barnett6c223762016-06-27 16:41:00 -05003351 ctrl_info->queue_memory_base =
3352 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3353 alloc_length,
3354 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3355
Kevin Barnettd87d5472017-05-03 18:54:00 -05003356 if (!ctrl_info->queue_memory_base)
Kevin Barnett6c223762016-06-27 16:41:00 -05003357 return -ENOMEM;
Kevin Barnett6c223762016-06-27 16:41:00 -05003358
3359 ctrl_info->queue_memory_length = alloc_length;
3360
3361 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3362 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3363
3364 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3365 queue_group = &ctrl_info->queue_groups[i];
3366 queue_group->iq_element_array[RAID_PATH] = element_array;
3367 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3368 ctrl_info->queue_memory_base_dma_handle +
3369 (element_array - ctrl_info->queue_memory_base);
3370 element_array += element_array_length_per_iq;
3371 element_array = PTR_ALIGN(element_array,
3372 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3373 queue_group->iq_element_array[AIO_PATH] = element_array;
3374 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3375 ctrl_info->queue_memory_base_dma_handle +
3376 (element_array - ctrl_info->queue_memory_base);
3377 element_array += element_array_length_per_iq;
3378 element_array = PTR_ALIGN(element_array,
3379 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3380 }
3381
3382 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3383 queue_group = &ctrl_info->queue_groups[i];
3384 queue_group->oq_element_array = element_array;
3385 queue_group->oq_element_array_bus_addr =
3386 ctrl_info->queue_memory_base_dma_handle +
3387 (element_array - ctrl_info->queue_memory_base);
3388 element_array += element_array_length_per_oq;
3389 element_array = PTR_ALIGN(element_array,
3390 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3391 }
3392
3393 ctrl_info->event_queue.oq_element_array = element_array;
3394 ctrl_info->event_queue.oq_element_array_bus_addr =
3395 ctrl_info->queue_memory_base_dma_handle +
3396 (element_array - ctrl_info->queue_memory_base);
3397 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3398 PQI_EVENT_OQ_ELEMENT_LENGTH;
3399
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003400 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
Kevin Barnett6c223762016-06-27 16:41:00 -05003401 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3402
3403 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3404 queue_group = &ctrl_info->queue_groups[i];
3405 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3406 queue_group->iq_ci_bus_addr[RAID_PATH] =
3407 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003408 (next_queue_index -
3409 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003410 next_queue_index += sizeof(pqi_index_t);
3411 next_queue_index = PTR_ALIGN(next_queue_index,
3412 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3413 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3414 queue_group->iq_ci_bus_addr[AIO_PATH] =
3415 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003416 (next_queue_index -
3417 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003418 next_queue_index += sizeof(pqi_index_t);
3419 next_queue_index = PTR_ALIGN(next_queue_index,
3420 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3421 queue_group->oq_pi = next_queue_index;
3422 queue_group->oq_pi_bus_addr =
3423 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003424 (next_queue_index -
3425 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003426 next_queue_index += sizeof(pqi_index_t);
3427 next_queue_index = PTR_ALIGN(next_queue_index,
3428 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3429 }
3430
3431 ctrl_info->event_queue.oq_pi = next_queue_index;
3432 ctrl_info->event_queue.oq_pi_bus_addr =
3433 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003434 (next_queue_index -
3435 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003436
3437 return 0;
3438}
3439
3440static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3441{
3442 unsigned int i;
3443 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3444 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3445
3446 /*
3447 * Initialize the backpointers to the controller structure in
3448 * each operational queue group structure.
3449 */
3450 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3451 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3452
3453 /*
3454 * Assign IDs to all operational queues. Note that the IDs
3455 * assigned to operational IQs are independent of the IDs
3456 * assigned to operational OQs.
3457 */
3458 ctrl_info->event_queue.oq_id = next_oq_id++;
3459 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3460 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3461 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3462 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3463 }
3464
3465 /*
3466 * Assign MSI-X table entry indexes to all queues. Note that the
3467 * interrupt for the event queue is shared with the first queue group.
3468 */
3469 ctrl_info->event_queue.int_msg_num = 0;
3470 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3471 ctrl_info->queue_groups[i].int_msg_num = i;
3472
3473 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3474 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3475 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3476 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3477 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3478 }
3479}
3480
3481static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3482{
3483 size_t alloc_length;
3484 struct pqi_admin_queues_aligned *admin_queues_aligned;
3485 struct pqi_admin_queues *admin_queues;
3486
3487 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3488 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3489
3490 ctrl_info->admin_queue_memory_base =
3491 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3492 alloc_length,
3493 &ctrl_info->admin_queue_memory_base_dma_handle,
3494 GFP_KERNEL);
3495
3496 if (!ctrl_info->admin_queue_memory_base)
3497 return -ENOMEM;
3498
3499 ctrl_info->admin_queue_memory_length = alloc_length;
3500
3501 admin_queues = &ctrl_info->admin_queues;
3502 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3503 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3504 admin_queues->iq_element_array =
3505 &admin_queues_aligned->iq_element_array;
3506 admin_queues->oq_element_array =
3507 &admin_queues_aligned->oq_element_array;
3508 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003509 admin_queues->oq_pi =
3510 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
Kevin Barnett6c223762016-06-27 16:41:00 -05003511
3512 admin_queues->iq_element_array_bus_addr =
3513 ctrl_info->admin_queue_memory_base_dma_handle +
3514 (admin_queues->iq_element_array -
3515 ctrl_info->admin_queue_memory_base);
3516 admin_queues->oq_element_array_bus_addr =
3517 ctrl_info->admin_queue_memory_base_dma_handle +
3518 (admin_queues->oq_element_array -
3519 ctrl_info->admin_queue_memory_base);
3520 admin_queues->iq_ci_bus_addr =
3521 ctrl_info->admin_queue_memory_base_dma_handle +
3522 ((void *)admin_queues->iq_ci -
3523 ctrl_info->admin_queue_memory_base);
3524 admin_queues->oq_pi_bus_addr =
3525 ctrl_info->admin_queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003526 ((void __iomem *)admin_queues->oq_pi -
3527 (void __iomem *)ctrl_info->admin_queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003528
3529 return 0;
3530}
3531
3532#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3533#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3534
3535static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3536{
3537 struct pqi_device_registers __iomem *pqi_registers;
3538 struct pqi_admin_queues *admin_queues;
3539 unsigned long timeout;
3540 u8 status;
3541 u32 reg;
3542
3543 pqi_registers = ctrl_info->pqi_registers;
3544 admin_queues = &ctrl_info->admin_queues;
3545
3546 writeq((u64)admin_queues->iq_element_array_bus_addr,
3547 &pqi_registers->admin_iq_element_array_addr);
3548 writeq((u64)admin_queues->oq_element_array_bus_addr,
3549 &pqi_registers->admin_oq_element_array_addr);
3550 writeq((u64)admin_queues->iq_ci_bus_addr,
3551 &pqi_registers->admin_iq_ci_addr);
3552 writeq((u64)admin_queues->oq_pi_bus_addr,
3553 &pqi_registers->admin_oq_pi_addr);
3554
3555 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3556 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3557 (admin_queues->int_msg_num << 16);
3558 writel(reg, &pqi_registers->admin_iq_num_elements);
3559 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3560 &pqi_registers->function_and_status_code);
3561
3562 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3563 while (1) {
3564 status = readb(&pqi_registers->function_and_status_code);
3565 if (status == PQI_STATUS_IDLE)
3566 break;
3567 if (time_after(jiffies, timeout))
3568 return -ETIMEDOUT;
3569 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3570 }
3571
3572 /*
3573 * The offset registers are not initialized to the correct
3574 * offsets until *after* the create admin queue pair command
3575 * completes successfully.
3576 */
3577 admin_queues->iq_pi = ctrl_info->iomem_base +
3578 PQI_DEVICE_REGISTERS_OFFSET +
3579 readq(&pqi_registers->admin_iq_pi_offset);
3580 admin_queues->oq_ci = ctrl_info->iomem_base +
3581 PQI_DEVICE_REGISTERS_OFFSET +
3582 readq(&pqi_registers->admin_oq_ci_offset);
3583
3584 return 0;
3585}
3586
3587static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3588 struct pqi_general_admin_request *request)
3589{
3590 struct pqi_admin_queues *admin_queues;
3591 void *next_element;
3592 pqi_index_t iq_pi;
3593
3594 admin_queues = &ctrl_info->admin_queues;
3595 iq_pi = admin_queues->iq_pi_copy;
3596
3597 next_element = admin_queues->iq_element_array +
3598 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3599
3600 memcpy(next_element, request, sizeof(*request));
3601
3602 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3603 admin_queues->iq_pi_copy = iq_pi;
3604
3605 /*
3606 * This write notifies the controller that an IU is available to be
3607 * processed.
3608 */
3609 writel(iq_pi, admin_queues->iq_pi);
3610}
3611
Kevin Barnett13bede62017-05-03 18:55:13 -05003612#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3613
Kevin Barnett6c223762016-06-27 16:41:00 -05003614static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3615 struct pqi_general_admin_response *response)
3616{
3617 struct pqi_admin_queues *admin_queues;
3618 pqi_index_t oq_pi;
3619 pqi_index_t oq_ci;
3620 unsigned long timeout;
3621
3622 admin_queues = &ctrl_info->admin_queues;
3623 oq_ci = admin_queues->oq_ci_copy;
3624
Kevin Barnett13bede62017-05-03 18:55:13 -05003625 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
Kevin Barnett6c223762016-06-27 16:41:00 -05003626
3627 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003628 oq_pi = readl(admin_queues->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003629 if (oq_pi != oq_ci)
3630 break;
3631 if (time_after(jiffies, timeout)) {
3632 dev_err(&ctrl_info->pci_dev->dev,
3633 "timed out waiting for admin response\n");
3634 return -ETIMEDOUT;
3635 }
Kevin Barnett13bede62017-05-03 18:55:13 -05003636 if (!sis_is_firmware_running(ctrl_info))
3637 return -ENXIO;
Kevin Barnett6c223762016-06-27 16:41:00 -05003638 usleep_range(1000, 2000);
3639 }
3640
3641 memcpy(response, admin_queues->oq_element_array +
3642 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3643
3644 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3645 admin_queues->oq_ci_copy = oq_ci;
3646 writel(oq_ci, admin_queues->oq_ci);
3647
3648 return 0;
3649}
3650
3651static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3652 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3653 struct pqi_io_request *io_request)
3654{
3655 struct pqi_io_request *next;
3656 void *next_element;
3657 pqi_index_t iq_pi;
3658 pqi_index_t iq_ci;
3659 size_t iu_length;
3660 unsigned long flags;
3661 unsigned int num_elements_needed;
3662 unsigned int num_elements_to_end_of_queue;
3663 size_t copy_count;
3664 struct pqi_iu_header *request;
3665
3666 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3667
Kevin Barnett376fb882017-05-03 18:54:43 -05003668 if (io_request) {
3669 io_request->queue_group = queue_group;
Kevin Barnett6c223762016-06-27 16:41:00 -05003670 list_add_tail(&io_request->request_list_entry,
3671 &queue_group->request_list[path]);
Kevin Barnett376fb882017-05-03 18:54:43 -05003672 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003673
3674 iq_pi = queue_group->iq_pi_copy[path];
3675
3676 list_for_each_entry_safe(io_request, next,
3677 &queue_group->request_list[path], request_list_entry) {
3678
3679 request = io_request->iu;
3680
3681 iu_length = get_unaligned_le16(&request->iu_length) +
3682 PQI_REQUEST_HEADER_LENGTH;
3683 num_elements_needed =
3684 DIV_ROUND_UP(iu_length,
3685 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3686
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003687 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003688
3689 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3690 ctrl_info->num_elements_per_iq))
3691 break;
3692
3693 put_unaligned_le16(queue_group->oq_id,
3694 &request->response_queue_id);
3695
3696 next_element = queue_group->iq_element_array[path] +
3697 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3698
3699 num_elements_to_end_of_queue =
3700 ctrl_info->num_elements_per_iq - iq_pi;
3701
3702 if (num_elements_needed <= num_elements_to_end_of_queue) {
3703 memcpy(next_element, request, iu_length);
3704 } else {
3705 copy_count = num_elements_to_end_of_queue *
3706 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3707 memcpy(next_element, request, copy_count);
3708 memcpy(queue_group->iq_element_array[path],
3709 (u8 *)request + copy_count,
3710 iu_length - copy_count);
3711 }
3712
3713 iq_pi = (iq_pi + num_elements_needed) %
3714 ctrl_info->num_elements_per_iq;
3715
3716 list_del(&io_request->request_list_entry);
3717 }
3718
3719 if (iq_pi != queue_group->iq_pi_copy[path]) {
3720 queue_group->iq_pi_copy[path] = iq_pi;
3721 /*
3722 * This write notifies the controller that one or more IUs are
3723 * available to be processed.
3724 */
3725 writel(iq_pi, queue_group->iq_pi[path]);
3726 }
3727
3728 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3729}
3730
Kevin Barnett1f37e992017-05-03 18:53:24 -05003731#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3732
3733static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3734 struct completion *wait)
3735{
3736 int rc;
Kevin Barnett1f37e992017-05-03 18:53:24 -05003737
3738 while (1) {
3739 if (wait_for_completion_io_timeout(wait,
3740 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3741 rc = 0;
3742 break;
3743 }
3744
3745 pqi_check_ctrl_health(ctrl_info);
3746 if (pqi_ctrl_offline(ctrl_info)) {
3747 rc = -ENXIO;
3748 break;
3749 }
Kevin Barnett1f37e992017-05-03 18:53:24 -05003750 }
3751
3752 return rc;
3753}
3754
Kevin Barnett6c223762016-06-27 16:41:00 -05003755static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3756 void *context)
3757{
3758 struct completion *waiting = context;
3759
3760 complete(waiting);
3761}
3762
Kevin Barnett26b390a2018-06-18 13:22:48 -05003763static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3764 *error_info)
3765{
3766 int rc = -EIO;
3767
3768 switch (error_info->data_out_result) {
3769 case PQI_DATA_IN_OUT_GOOD:
3770 if (error_info->status == SAM_STAT_GOOD)
3771 rc = 0;
3772 break;
3773 case PQI_DATA_IN_OUT_UNDERFLOW:
3774 if (error_info->status == SAM_STAT_GOOD ||
3775 error_info->status == SAM_STAT_CHECK_CONDITION)
3776 rc = 0;
3777 break;
3778 case PQI_DATA_IN_OUT_ABORTED:
3779 rc = PQI_CMD_STATUS_ABORTED;
3780 break;
3781 }
3782
3783 return rc;
3784}
3785
Kevin Barnett6c223762016-06-27 16:41:00 -05003786static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3787 struct pqi_iu_header *request, unsigned int flags,
3788 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3789{
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003790 int rc = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05003791 struct pqi_io_request *io_request;
3792 unsigned long start_jiffies;
3793 unsigned long msecs_blocked;
3794 size_t iu_length;
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003795 DECLARE_COMPLETION_ONSTACK(wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05003796
3797 /*
3798 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3799 * are mutually exclusive.
3800 */
3801
3802 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3803 if (down_interruptible(&ctrl_info->sync_request_sem))
3804 return -ERESTARTSYS;
3805 } else {
3806 if (timeout_msecs == NO_TIMEOUT) {
3807 down(&ctrl_info->sync_request_sem);
3808 } else {
3809 start_jiffies = jiffies;
3810 if (down_timeout(&ctrl_info->sync_request_sem,
3811 msecs_to_jiffies(timeout_msecs)))
3812 return -ETIMEDOUT;
3813 msecs_blocked =
3814 jiffies_to_msecs(jiffies - start_jiffies);
3815 if (msecs_blocked >= timeout_msecs)
3816 return -ETIMEDOUT;
3817 timeout_msecs -= msecs_blocked;
3818 }
3819 }
3820
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003821 pqi_ctrl_busy(ctrl_info);
3822 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3823 if (timeout_msecs == 0) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003824 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003825 rc = -ETIMEDOUT;
3826 goto out;
3827 }
3828
Kevin Barnett376fb882017-05-03 18:54:43 -05003829 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003830 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05003831 rc = -ENXIO;
3832 goto out;
3833 }
3834
Kevin Barnett6c223762016-06-27 16:41:00 -05003835 io_request = pqi_alloc_io_request(ctrl_info);
3836
3837 put_unaligned_le16(io_request->index,
3838 &(((struct pqi_raid_path_request *)request)->request_id));
3839
3840 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3841 ((struct pqi_raid_path_request *)request)->error_index =
3842 ((struct pqi_raid_path_request *)request)->request_id;
3843
3844 iu_length = get_unaligned_le16(&request->iu_length) +
3845 PQI_REQUEST_HEADER_LENGTH;
3846 memcpy(io_request->iu, request, iu_length);
3847
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003848 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3849 io_request->context = &wait;
3850
3851 pqi_start_io(ctrl_info,
3852 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3853 io_request);
3854
3855 pqi_ctrl_unbusy(ctrl_info);
3856
3857 if (timeout_msecs == NO_TIMEOUT) {
3858 pqi_wait_for_completion_io(ctrl_info, &wait);
3859 } else {
3860 if (!wait_for_completion_io_timeout(&wait,
3861 msecs_to_jiffies(timeout_msecs))) {
3862 dev_warn(&ctrl_info->pci_dev->dev,
3863 "command timed out\n");
3864 rc = -ETIMEDOUT;
3865 }
3866 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003867
3868 if (error_info) {
3869 if (io_request->error_info)
3870 memcpy(error_info, io_request->error_info,
3871 sizeof(*error_info));
3872 else
3873 memset(error_info, 0, sizeof(*error_info));
3874 } else if (rc == 0 && io_request->error_info) {
Kevin Barnett26b390a2018-06-18 13:22:48 -05003875 rc = pqi_process_raid_io_error_synchronous(
3876 io_request->error_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003877 }
3878
3879 pqi_free_io_request(io_request);
3880
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003881out:
Kevin Barnett6c223762016-06-27 16:41:00 -05003882 up(&ctrl_info->sync_request_sem);
3883
3884 return rc;
3885}
3886
3887static int pqi_validate_admin_response(
3888 struct pqi_general_admin_response *response, u8 expected_function_code)
3889{
3890 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3891 return -EINVAL;
3892
3893 if (get_unaligned_le16(&response->header.iu_length) !=
3894 PQI_GENERAL_ADMIN_IU_LENGTH)
3895 return -EINVAL;
3896
3897 if (response->function_code != expected_function_code)
3898 return -EINVAL;
3899
3900 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3901 return -EINVAL;
3902
3903 return 0;
3904}
3905
3906static int pqi_submit_admin_request_synchronous(
3907 struct pqi_ctrl_info *ctrl_info,
3908 struct pqi_general_admin_request *request,
3909 struct pqi_general_admin_response *response)
3910{
3911 int rc;
3912
3913 pqi_submit_admin_request(ctrl_info, request);
3914
3915 rc = pqi_poll_for_admin_response(ctrl_info, response);
3916
3917 if (rc == 0)
3918 rc = pqi_validate_admin_response(response,
3919 request->function_code);
3920
3921 return rc;
3922}
3923
3924static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3925{
3926 int rc;
3927 struct pqi_general_admin_request request;
3928 struct pqi_general_admin_response response;
3929 struct pqi_device_capability *capability;
3930 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3931
3932 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3933 if (!capability)
3934 return -ENOMEM;
3935
3936 memset(&request, 0, sizeof(request));
3937
3938 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3939 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3940 &request.header.iu_length);
3941 request.function_code =
3942 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3943 put_unaligned_le32(sizeof(*capability),
3944 &request.data.report_device_capability.buffer_length);
3945
3946 rc = pqi_map_single(ctrl_info->pci_dev,
3947 &request.data.report_device_capability.sg_descriptor,
3948 capability, sizeof(*capability),
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02003949 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05003950 if (rc)
3951 goto out;
3952
3953 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3954 &response);
3955
3956 pqi_pci_unmap(ctrl_info->pci_dev,
3957 &request.data.report_device_capability.sg_descriptor, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02003958 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05003959
3960 if (rc)
3961 goto out;
3962
3963 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3964 rc = -EIO;
3965 goto out;
3966 }
3967
3968 ctrl_info->max_inbound_queues =
3969 get_unaligned_le16(&capability->max_inbound_queues);
3970 ctrl_info->max_elements_per_iq =
3971 get_unaligned_le16(&capability->max_elements_per_iq);
3972 ctrl_info->max_iq_element_length =
3973 get_unaligned_le16(&capability->max_iq_element_length)
3974 * 16;
3975 ctrl_info->max_outbound_queues =
3976 get_unaligned_le16(&capability->max_outbound_queues);
3977 ctrl_info->max_elements_per_oq =
3978 get_unaligned_le16(&capability->max_elements_per_oq);
3979 ctrl_info->max_oq_element_length =
3980 get_unaligned_le16(&capability->max_oq_element_length)
3981 * 16;
3982
3983 sop_iu_layer_descriptor =
3984 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3985
3986 ctrl_info->max_inbound_iu_length_per_firmware =
3987 get_unaligned_le16(
3988 &sop_iu_layer_descriptor->max_inbound_iu_length);
3989 ctrl_info->inbound_spanning_supported =
3990 sop_iu_layer_descriptor->inbound_spanning_supported;
3991 ctrl_info->outbound_spanning_supported =
3992 sop_iu_layer_descriptor->outbound_spanning_supported;
3993
3994out:
3995 kfree(capability);
3996
3997 return rc;
3998}
3999
4000static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4001{
4002 if (ctrl_info->max_iq_element_length <
4003 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4004 dev_err(&ctrl_info->pci_dev->dev,
4005 "max. inbound queue element length of %d is less than the required length of %d\n",
4006 ctrl_info->max_iq_element_length,
4007 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4008 return -EINVAL;
4009 }
4010
4011 if (ctrl_info->max_oq_element_length <
4012 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4013 dev_err(&ctrl_info->pci_dev->dev,
4014 "max. outbound queue element length of %d is less than the required length of %d\n",
4015 ctrl_info->max_oq_element_length,
4016 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4017 return -EINVAL;
4018 }
4019
4020 if (ctrl_info->max_inbound_iu_length_per_firmware <
4021 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4022 dev_err(&ctrl_info->pci_dev->dev,
4023 "max. inbound IU length of %u is less than the min. required length of %d\n",
4024 ctrl_info->max_inbound_iu_length_per_firmware,
4025 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4026 return -EINVAL;
4027 }
4028
Kevin Barnett77668f42016-08-31 14:54:23 -05004029 if (!ctrl_info->inbound_spanning_supported) {
4030 dev_err(&ctrl_info->pci_dev->dev,
4031 "the controller does not support inbound spanning\n");
4032 return -EINVAL;
4033 }
4034
4035 if (ctrl_info->outbound_spanning_supported) {
4036 dev_err(&ctrl_info->pci_dev->dev,
4037 "the controller supports outbound spanning but this driver does not\n");
4038 return -EINVAL;
4039 }
4040
Kevin Barnett6c223762016-06-27 16:41:00 -05004041 return 0;
4042}
4043
Kevin Barnett6c223762016-06-27 16:41:00 -05004044static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4045{
4046 int rc;
4047 struct pqi_event_queue *event_queue;
4048 struct pqi_general_admin_request request;
4049 struct pqi_general_admin_response response;
4050
4051 event_queue = &ctrl_info->event_queue;
4052
4053 /*
4054 * Create OQ (Outbound Queue - device to host queue) to dedicate
4055 * to events.
4056 */
4057 memset(&request, 0, sizeof(request));
4058 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4059 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4060 &request.header.iu_length);
4061 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4062 put_unaligned_le16(event_queue->oq_id,
4063 &request.data.create_operational_oq.queue_id);
4064 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4065 &request.data.create_operational_oq.element_array_addr);
4066 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4067 &request.data.create_operational_oq.pi_addr);
4068 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4069 &request.data.create_operational_oq.num_elements);
4070 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4071 &request.data.create_operational_oq.element_length);
4072 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4073 put_unaligned_le16(event_queue->int_msg_num,
4074 &request.data.create_operational_oq.int_msg_num);
4075
4076 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4077 &response);
4078 if (rc)
4079 return rc;
4080
4081 event_queue->oq_ci = ctrl_info->iomem_base +
4082 PQI_DEVICE_REGISTERS_OFFSET +
4083 get_unaligned_le64(
4084 &response.data.create_operational_oq.oq_ci_offset);
4085
4086 return 0;
4087}
4088
Kevin Barnett061ef062017-05-03 18:53:05 -05004089static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4090 unsigned int group_number)
Kevin Barnett6c223762016-06-27 16:41:00 -05004091{
Kevin Barnett6c223762016-06-27 16:41:00 -05004092 int rc;
4093 struct pqi_queue_group *queue_group;
4094 struct pqi_general_admin_request request;
4095 struct pqi_general_admin_response response;
4096
Kevin Barnett061ef062017-05-03 18:53:05 -05004097 queue_group = &ctrl_info->queue_groups[group_number];
Kevin Barnett6c223762016-06-27 16:41:00 -05004098
4099 /*
4100 * Create IQ (Inbound Queue - host to device queue) for
4101 * RAID path.
4102 */
4103 memset(&request, 0, sizeof(request));
4104 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4105 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4106 &request.header.iu_length);
4107 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4108 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4109 &request.data.create_operational_iq.queue_id);
4110 put_unaligned_le64(
4111 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4112 &request.data.create_operational_iq.element_array_addr);
4113 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4114 &request.data.create_operational_iq.ci_addr);
4115 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4116 &request.data.create_operational_iq.num_elements);
4117 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4118 &request.data.create_operational_iq.element_length);
4119 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4120
4121 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4122 &response);
4123 if (rc) {
4124 dev_err(&ctrl_info->pci_dev->dev,
4125 "error creating inbound RAID queue\n");
4126 return rc;
4127 }
4128
4129 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4130 PQI_DEVICE_REGISTERS_OFFSET +
4131 get_unaligned_le64(
4132 &response.data.create_operational_iq.iq_pi_offset);
4133
4134 /*
4135 * Create IQ (Inbound Queue - host to device queue) for
4136 * Advanced I/O (AIO) path.
4137 */
4138 memset(&request, 0, sizeof(request));
4139 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4140 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4141 &request.header.iu_length);
4142 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4143 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4144 &request.data.create_operational_iq.queue_id);
4145 put_unaligned_le64((u64)queue_group->
4146 iq_element_array_bus_addr[AIO_PATH],
4147 &request.data.create_operational_iq.element_array_addr);
4148 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4149 &request.data.create_operational_iq.ci_addr);
4150 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4151 &request.data.create_operational_iq.num_elements);
4152 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4153 &request.data.create_operational_iq.element_length);
4154 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4155
4156 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4157 &response);
4158 if (rc) {
4159 dev_err(&ctrl_info->pci_dev->dev,
4160 "error creating inbound AIO queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004161 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004162 }
4163
4164 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4165 PQI_DEVICE_REGISTERS_OFFSET +
4166 get_unaligned_le64(
4167 &response.data.create_operational_iq.iq_pi_offset);
4168
4169 /*
4170 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4171 * assumed to be for RAID path I/O unless we change the queue's
4172 * property.
4173 */
4174 memset(&request, 0, sizeof(request));
4175 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4176 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4177 &request.header.iu_length);
4178 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4179 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4180 &request.data.change_operational_iq_properties.queue_id);
4181 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4182 &request.data.change_operational_iq_properties.vendor_specific);
4183
4184 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4185 &response);
4186 if (rc) {
4187 dev_err(&ctrl_info->pci_dev->dev,
4188 "error changing queue property\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004189 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004190 }
4191
4192 /*
4193 * Create OQ (Outbound Queue - device to host queue).
4194 */
4195 memset(&request, 0, sizeof(request));
4196 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4197 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4198 &request.header.iu_length);
4199 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4200 put_unaligned_le16(queue_group->oq_id,
4201 &request.data.create_operational_oq.queue_id);
4202 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4203 &request.data.create_operational_oq.element_array_addr);
4204 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4205 &request.data.create_operational_oq.pi_addr);
4206 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4207 &request.data.create_operational_oq.num_elements);
4208 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4209 &request.data.create_operational_oq.element_length);
4210 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4211 put_unaligned_le16(queue_group->int_msg_num,
4212 &request.data.create_operational_oq.int_msg_num);
4213
4214 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4215 &response);
4216 if (rc) {
4217 dev_err(&ctrl_info->pci_dev->dev,
4218 "error creating outbound queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004219 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004220 }
4221
4222 queue_group->oq_ci = ctrl_info->iomem_base +
4223 PQI_DEVICE_REGISTERS_OFFSET +
4224 get_unaligned_le64(
4225 &response.data.create_operational_oq.oq_ci_offset);
4226
Kevin Barnett6c223762016-06-27 16:41:00 -05004227 return 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004228}
4229
4230static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4231{
4232 int rc;
4233 unsigned int i;
4234
4235 rc = pqi_create_event_queue(ctrl_info);
4236 if (rc) {
4237 dev_err(&ctrl_info->pci_dev->dev,
4238 "error creating event queue\n");
4239 return rc;
4240 }
4241
4242 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
Kevin Barnett061ef062017-05-03 18:53:05 -05004243 rc = pqi_create_queue_group(ctrl_info, i);
Kevin Barnett6c223762016-06-27 16:41:00 -05004244 if (rc) {
4245 dev_err(&ctrl_info->pci_dev->dev,
4246 "error creating queue group number %u/%u\n",
4247 i, ctrl_info->num_queue_groups);
4248 return rc;
4249 }
4250 }
4251
4252 return 0;
4253}
4254
4255#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4256 (offsetof(struct pqi_event_config, descriptors) + \
4257 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4258
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004259static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4260 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05004261{
4262 int rc;
4263 unsigned int i;
4264 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004265 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05004266 struct pqi_general_management_request request;
4267
4268 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4269 GFP_KERNEL);
4270 if (!event_config)
4271 return -ENOMEM;
4272
4273 memset(&request, 0, sizeof(request));
4274
4275 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4276 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4277 data.report_event_configuration.sg_descriptors[1]) -
4278 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4279 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4280 &request.data.report_event_configuration.buffer_length);
4281
4282 rc = pqi_map_single(ctrl_info->pci_dev,
4283 request.data.report_event_configuration.sg_descriptors,
4284 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004285 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004286 if (rc)
4287 goto out;
4288
4289 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4290 0, NULL, NO_TIMEOUT);
4291
4292 pqi_pci_unmap(ctrl_info->pci_dev,
4293 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004294 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004295
4296 if (rc)
4297 goto out;
4298
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004299 for (i = 0; i < event_config->num_event_descriptors; i++) {
4300 event_descriptor = &event_config->descriptors[i];
4301 if (enable_events &&
4302 pqi_is_supported_event(event_descriptor->event_type))
4303 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4304 &event_descriptor->oq_id);
4305 else
4306 put_unaligned_le16(0, &event_descriptor->oq_id);
4307 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004308
4309 memset(&request, 0, sizeof(request));
4310
4311 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4312 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4313 data.report_event_configuration.sg_descriptors[1]) -
4314 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4315 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4316 &request.data.report_event_configuration.buffer_length);
4317
4318 rc = pqi_map_single(ctrl_info->pci_dev,
4319 request.data.report_event_configuration.sg_descriptors,
4320 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004321 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004322 if (rc)
4323 goto out;
4324
4325 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4326 NULL, NO_TIMEOUT);
4327
4328 pqi_pci_unmap(ctrl_info->pci_dev,
4329 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004330 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004331
4332out:
4333 kfree(event_config);
4334
4335 return rc;
4336}
4337
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004338static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4339{
4340 return pqi_configure_events(ctrl_info, true);
4341}
4342
4343static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4344{
4345 return pqi_configure_events(ctrl_info, false);
4346}
4347
Kevin Barnett6c223762016-06-27 16:41:00 -05004348static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4349{
4350 unsigned int i;
4351 struct device *dev;
4352 size_t sg_chain_buffer_length;
4353 struct pqi_io_request *io_request;
4354
4355 if (!ctrl_info->io_request_pool)
4356 return;
4357
4358 dev = &ctrl_info->pci_dev->dev;
4359 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4360 io_request = ctrl_info->io_request_pool;
4361
4362 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4363 kfree(io_request->iu);
4364 if (!io_request->sg_chain_buffer)
4365 break;
4366 dma_free_coherent(dev, sg_chain_buffer_length,
4367 io_request->sg_chain_buffer,
4368 io_request->sg_chain_buffer_dma_handle);
4369 io_request++;
4370 }
4371
4372 kfree(ctrl_info->io_request_pool);
4373 ctrl_info->io_request_pool = NULL;
4374}
4375
4376static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4377{
4378 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4379 ctrl_info->error_buffer_length,
4380 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4381
4382 if (!ctrl_info->error_buffer)
4383 return -ENOMEM;
4384
4385 return 0;
4386}
4387
4388static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4389{
4390 unsigned int i;
4391 void *sg_chain_buffer;
4392 size_t sg_chain_buffer_length;
4393 dma_addr_t sg_chain_buffer_dma_handle;
4394 struct device *dev;
4395 struct pqi_io_request *io_request;
4396
Kees Cook6396bb22018-06-12 14:03:40 -07004397 ctrl_info->io_request_pool =
4398 kcalloc(ctrl_info->max_io_slots,
4399 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05004400
4401 if (!ctrl_info->io_request_pool) {
4402 dev_err(&ctrl_info->pci_dev->dev,
4403 "failed to allocate I/O request pool\n");
4404 goto error;
4405 }
4406
4407 dev = &ctrl_info->pci_dev->dev;
4408 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4409 io_request = ctrl_info->io_request_pool;
4410
4411 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4412 io_request->iu =
4413 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4414
4415 if (!io_request->iu) {
4416 dev_err(&ctrl_info->pci_dev->dev,
4417 "failed to allocate IU buffers\n");
4418 goto error;
4419 }
4420
4421 sg_chain_buffer = dma_alloc_coherent(dev,
4422 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4423 GFP_KERNEL);
4424
4425 if (!sg_chain_buffer) {
4426 dev_err(&ctrl_info->pci_dev->dev,
4427 "failed to allocate PQI scatter-gather chain buffers\n");
4428 goto error;
4429 }
4430
4431 io_request->index = i;
4432 io_request->sg_chain_buffer = sg_chain_buffer;
4433 io_request->sg_chain_buffer_dma_handle =
4434 sg_chain_buffer_dma_handle;
4435 io_request++;
4436 }
4437
4438 return 0;
4439
4440error:
4441 pqi_free_all_io_requests(ctrl_info);
4442
4443 return -ENOMEM;
4444}
4445
4446/*
4447 * Calculate required resources that are sized based on max. outstanding
4448 * requests and max. transfer size.
4449 */
4450
4451static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4452{
4453 u32 max_transfer_size;
4454 u32 max_sg_entries;
4455
4456 ctrl_info->scsi_ml_can_queue =
4457 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4458 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4459
4460 ctrl_info->error_buffer_length =
4461 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4462
Kevin Barnettd727a772017-05-03 18:54:25 -05004463 if (reset_devices)
4464 max_transfer_size = min(ctrl_info->max_transfer_size,
4465 PQI_MAX_TRANSFER_SIZE_KDUMP);
4466 else
4467 max_transfer_size = min(ctrl_info->max_transfer_size,
4468 PQI_MAX_TRANSFER_SIZE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004469
4470 max_sg_entries = max_transfer_size / PAGE_SIZE;
4471
4472 /* +1 to cover when the buffer is not page-aligned. */
4473 max_sg_entries++;
4474
4475 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4476
4477 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4478
4479 ctrl_info->sg_chain_buffer_length =
Kevin Barnette1d213b2017-05-03 18:53:18 -05004480 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4481 PQI_EXTRA_SGL_MEMORY;
Kevin Barnett6c223762016-06-27 16:41:00 -05004482 ctrl_info->sg_tablesize = max_sg_entries;
4483 ctrl_info->max_sectors = max_transfer_size / 512;
4484}
4485
4486static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4487{
Kevin Barnett6c223762016-06-27 16:41:00 -05004488 int num_queue_groups;
4489 u16 num_elements_per_iq;
4490 u16 num_elements_per_oq;
4491
Kevin Barnettd727a772017-05-03 18:54:25 -05004492 if (reset_devices) {
4493 num_queue_groups = 1;
4494 } else {
4495 int num_cpus;
4496 int max_queue_groups;
Kevin Barnett6c223762016-06-27 16:41:00 -05004497
Kevin Barnettd727a772017-05-03 18:54:25 -05004498 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4499 ctrl_info->max_outbound_queues - 1);
4500 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4501
4502 num_cpus = num_online_cpus();
4503 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4504 num_queue_groups = min(num_queue_groups, max_queue_groups);
4505 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004506
4507 ctrl_info->num_queue_groups = num_queue_groups;
Kevin Barnett061ef062017-05-03 18:53:05 -05004508 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05004509
Kevin Barnett77668f42016-08-31 14:54:23 -05004510 /*
4511 * Make sure that the max. inbound IU length is an even multiple
4512 * of our inbound element length.
4513 */
4514 ctrl_info->max_inbound_iu_length =
4515 (ctrl_info->max_inbound_iu_length_per_firmware /
4516 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4517 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004518
4519 num_elements_per_iq =
4520 (ctrl_info->max_inbound_iu_length /
4521 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4522
4523 /* Add one because one element in each queue is unusable. */
4524 num_elements_per_iq++;
4525
4526 num_elements_per_iq = min(num_elements_per_iq,
4527 ctrl_info->max_elements_per_iq);
4528
4529 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4530 num_elements_per_oq = min(num_elements_per_oq,
4531 ctrl_info->max_elements_per_oq);
4532
4533 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4534 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4535
4536 ctrl_info->max_sg_per_iu =
4537 ((ctrl_info->max_inbound_iu_length -
4538 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4539 sizeof(struct pqi_sg_descriptor)) +
4540 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4541}
4542
4543static inline void pqi_set_sg_descriptor(
4544 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4545{
4546 u64 address = (u64)sg_dma_address(sg);
4547 unsigned int length = sg_dma_len(sg);
4548
4549 put_unaligned_le64(address, &sg_descriptor->address);
4550 put_unaligned_le32(length, &sg_descriptor->length);
4551 put_unaligned_le32(0, &sg_descriptor->flags);
4552}
4553
4554static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4555 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4556 struct pqi_io_request *io_request)
4557{
4558 int i;
4559 u16 iu_length;
4560 int sg_count;
4561 bool chained;
4562 unsigned int num_sg_in_iu;
4563 unsigned int max_sg_per_iu;
4564 struct scatterlist *sg;
4565 struct pqi_sg_descriptor *sg_descriptor;
4566
4567 sg_count = scsi_dma_map(scmd);
4568 if (sg_count < 0)
4569 return sg_count;
4570
4571 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4572 PQI_REQUEST_HEADER_LENGTH;
4573
4574 if (sg_count == 0)
4575 goto out;
4576
4577 sg = scsi_sglist(scmd);
4578 sg_descriptor = request->sg_descriptors;
4579 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4580 chained = false;
4581 num_sg_in_iu = 0;
4582 i = 0;
4583
4584 while (1) {
4585 pqi_set_sg_descriptor(sg_descriptor, sg);
4586 if (!chained)
4587 num_sg_in_iu++;
4588 i++;
4589 if (i == sg_count)
4590 break;
4591 sg_descriptor++;
4592 if (i == max_sg_per_iu) {
4593 put_unaligned_le64(
4594 (u64)io_request->sg_chain_buffer_dma_handle,
4595 &sg_descriptor->address);
4596 put_unaligned_le32((sg_count - num_sg_in_iu)
4597 * sizeof(*sg_descriptor),
4598 &sg_descriptor->length);
4599 put_unaligned_le32(CISS_SG_CHAIN,
4600 &sg_descriptor->flags);
4601 chained = true;
4602 num_sg_in_iu++;
4603 sg_descriptor = io_request->sg_chain_buffer;
4604 }
4605 sg = sg_next(sg);
4606 }
4607
4608 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4609 request->partial = chained;
4610 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4611
4612out:
4613 put_unaligned_le16(iu_length, &request->header.iu_length);
4614
4615 return 0;
4616}
4617
4618static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4619 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4620 struct pqi_io_request *io_request)
4621{
4622 int i;
4623 u16 iu_length;
4624 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004625 bool chained;
4626 unsigned int num_sg_in_iu;
4627 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004628 struct scatterlist *sg;
4629 struct pqi_sg_descriptor *sg_descriptor;
4630
4631 sg_count = scsi_dma_map(scmd);
4632 if (sg_count < 0)
4633 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004634
4635 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4636 PQI_REQUEST_HEADER_LENGTH;
4637 num_sg_in_iu = 0;
4638
Kevin Barnett6c223762016-06-27 16:41:00 -05004639 if (sg_count == 0)
4640 goto out;
4641
Kevin Barnetta60eec02016-08-31 14:54:11 -05004642 sg = scsi_sglist(scmd);
4643 sg_descriptor = request->sg_descriptors;
4644 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4645 chained = false;
4646 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004647
Kevin Barnetta60eec02016-08-31 14:54:11 -05004648 while (1) {
4649 pqi_set_sg_descriptor(sg_descriptor, sg);
4650 if (!chained)
4651 num_sg_in_iu++;
4652 i++;
4653 if (i == sg_count)
4654 break;
4655 sg_descriptor++;
4656 if (i == max_sg_per_iu) {
4657 put_unaligned_le64(
4658 (u64)io_request->sg_chain_buffer_dma_handle,
4659 &sg_descriptor->address);
4660 put_unaligned_le32((sg_count - num_sg_in_iu)
4661 * sizeof(*sg_descriptor),
4662 &sg_descriptor->length);
4663 put_unaligned_le32(CISS_SG_CHAIN,
4664 &sg_descriptor->flags);
4665 chained = true;
4666 num_sg_in_iu++;
4667 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004668 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004669 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004670 }
4671
Kevin Barnetta60eec02016-08-31 14:54:11 -05004672 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4673 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004674 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004675
4676out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004677 put_unaligned_le16(iu_length, &request->header.iu_length);
4678 request->num_sg_descriptors = num_sg_in_iu;
4679
4680 return 0;
4681}
4682
4683static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4684 void *context)
4685{
4686 struct scsi_cmnd *scmd;
4687
4688 scmd = io_request->scmd;
4689 pqi_free_io_request(io_request);
4690 scsi_dma_unmap(scmd);
4691 pqi_scsi_done(scmd);
4692}
4693
Kevin Barnett376fb882017-05-03 18:54:43 -05004694static int pqi_raid_submit_scsi_cmd_with_io_request(
4695 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004696 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4697 struct pqi_queue_group *queue_group)
4698{
4699 int rc;
4700 size_t cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -05004701 struct pqi_raid_path_request *request;
4702
Kevin Barnett6c223762016-06-27 16:41:00 -05004703 io_request->io_complete_callback = pqi_raid_io_complete;
4704 io_request->scmd = scmd;
4705
Kevin Barnett6c223762016-06-27 16:41:00 -05004706 request = io_request->iu;
4707 memset(request, 0,
4708 offsetof(struct pqi_raid_path_request, sg_descriptors));
4709
4710 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4711 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4712 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4713 put_unaligned_le16(io_request->index, &request->request_id);
4714 request->error_index = request->request_id;
4715 memcpy(request->lun_number, device->scsi3addr,
4716 sizeof(request->lun_number));
4717
4718 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4719 memcpy(request->cdb, scmd->cmnd, cdb_length);
4720
4721 switch (cdb_length) {
4722 case 6:
4723 case 10:
4724 case 12:
4725 case 16:
4726 /* No bytes in the Additional CDB bytes field */
4727 request->additional_cdb_bytes_usage =
4728 SOP_ADDITIONAL_CDB_BYTES_0;
4729 break;
4730 case 20:
4731 /* 4 bytes in the Additional cdb field */
4732 request->additional_cdb_bytes_usage =
4733 SOP_ADDITIONAL_CDB_BYTES_4;
4734 break;
4735 case 24:
4736 /* 8 bytes in the Additional cdb field */
4737 request->additional_cdb_bytes_usage =
4738 SOP_ADDITIONAL_CDB_BYTES_8;
4739 break;
4740 case 28:
4741 /* 12 bytes in the Additional cdb field */
4742 request->additional_cdb_bytes_usage =
4743 SOP_ADDITIONAL_CDB_BYTES_12;
4744 break;
4745 case 32:
4746 default:
4747 /* 16 bytes in the Additional cdb field */
4748 request->additional_cdb_bytes_usage =
4749 SOP_ADDITIONAL_CDB_BYTES_16;
4750 break;
4751 }
4752
4753 switch (scmd->sc_data_direction) {
4754 case DMA_TO_DEVICE:
4755 request->data_direction = SOP_READ_FLAG;
4756 break;
4757 case DMA_FROM_DEVICE:
4758 request->data_direction = SOP_WRITE_FLAG;
4759 break;
4760 case DMA_NONE:
4761 request->data_direction = SOP_NO_DIRECTION_FLAG;
4762 break;
4763 case DMA_BIDIRECTIONAL:
4764 request->data_direction = SOP_BIDIRECTIONAL;
4765 break;
4766 default:
4767 dev_err(&ctrl_info->pci_dev->dev,
4768 "unknown data direction: %d\n",
4769 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004770 break;
4771 }
4772
4773 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4774 if (rc) {
4775 pqi_free_io_request(io_request);
4776 return SCSI_MLQUEUE_HOST_BUSY;
4777 }
4778
4779 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4780
4781 return 0;
4782}
4783
Kevin Barnett376fb882017-05-03 18:54:43 -05004784static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4785 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4786 struct pqi_queue_group *queue_group)
4787{
4788 struct pqi_io_request *io_request;
4789
4790 io_request = pqi_alloc_io_request(ctrl_info);
4791
4792 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4793 device, scmd, queue_group);
4794}
4795
4796static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4797{
4798 if (!pqi_ctrl_blocked(ctrl_info))
4799 schedule_work(&ctrl_info->raid_bypass_retry_work);
4800}
4801
4802static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4803{
4804 struct scsi_cmnd *scmd;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05004805 struct pqi_scsi_dev *device;
Kevin Barnett376fb882017-05-03 18:54:43 -05004806 struct pqi_ctrl_info *ctrl_info;
4807
4808 if (!io_request->raid_bypass)
4809 return false;
4810
4811 scmd = io_request->scmd;
4812 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4813 return false;
4814 if (host_byte(scmd->result) == DID_NO_CONNECT)
4815 return false;
4816
Kevin Barnett03b288cf2017-05-03 18:54:49 -05004817 device = scmd->device->hostdata;
4818 if (pqi_device_offline(device))
4819 return false;
4820
Kevin Barnett376fb882017-05-03 18:54:43 -05004821 ctrl_info = shost_to_hba(scmd->device->host);
4822 if (pqi_ctrl_offline(ctrl_info))
4823 return false;
4824
4825 return true;
4826}
4827
4828static inline void pqi_add_to_raid_bypass_retry_list(
4829 struct pqi_ctrl_info *ctrl_info,
4830 struct pqi_io_request *io_request, bool at_head)
4831{
4832 unsigned long flags;
4833
4834 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4835 if (at_head)
4836 list_add(&io_request->request_list_entry,
4837 &ctrl_info->raid_bypass_retry_list);
4838 else
4839 list_add_tail(&io_request->request_list_entry,
4840 &ctrl_info->raid_bypass_retry_list);
4841 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4842}
4843
4844static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4845 void *context)
4846{
4847 struct scsi_cmnd *scmd;
4848
4849 scmd = io_request->scmd;
4850 pqi_free_io_request(io_request);
4851 pqi_scsi_done(scmd);
4852}
4853
4854static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4855{
4856 struct scsi_cmnd *scmd;
4857 struct pqi_ctrl_info *ctrl_info;
4858
4859 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4860 scmd = io_request->scmd;
4861 scmd->result = 0;
4862 ctrl_info = shost_to_hba(scmd->device->host);
4863
4864 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4865 pqi_schedule_bypass_retry(ctrl_info);
4866}
4867
4868static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4869{
4870 struct scsi_cmnd *scmd;
4871 struct pqi_scsi_dev *device;
4872 struct pqi_ctrl_info *ctrl_info;
4873 struct pqi_queue_group *queue_group;
4874
4875 scmd = io_request->scmd;
4876 device = scmd->device->hostdata;
4877 if (pqi_device_in_reset(device)) {
4878 pqi_free_io_request(io_request);
4879 set_host_byte(scmd, DID_RESET);
4880 pqi_scsi_done(scmd);
4881 return 0;
4882 }
4883
4884 ctrl_info = shost_to_hba(scmd->device->host);
4885 queue_group = io_request->queue_group;
4886
4887 pqi_reinit_io_request(io_request);
4888
4889 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4890 device, scmd, queue_group);
4891}
4892
4893static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4894 struct pqi_ctrl_info *ctrl_info)
4895{
4896 unsigned long flags;
4897 struct pqi_io_request *io_request;
4898
4899 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4900 io_request = list_first_entry_or_null(
4901 &ctrl_info->raid_bypass_retry_list,
4902 struct pqi_io_request, request_list_entry);
4903 if (io_request)
4904 list_del(&io_request->request_list_entry);
4905 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4906
4907 return io_request;
4908}
4909
4910static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4911{
4912 int rc;
4913 struct pqi_io_request *io_request;
4914
4915 pqi_ctrl_busy(ctrl_info);
4916
4917 while (1) {
4918 if (pqi_ctrl_blocked(ctrl_info))
4919 break;
4920 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4921 if (!io_request)
4922 break;
4923 rc = pqi_retry_raid_bypass(io_request);
4924 if (rc) {
4925 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4926 true);
4927 pqi_schedule_bypass_retry(ctrl_info);
4928 break;
4929 }
4930 }
4931
4932 pqi_ctrl_unbusy(ctrl_info);
4933}
4934
4935static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4936{
4937 struct pqi_ctrl_info *ctrl_info;
4938
4939 ctrl_info = container_of(work, struct pqi_ctrl_info,
4940 raid_bypass_retry_work);
4941 pqi_retry_raid_bypass_requests(ctrl_info);
4942}
4943
Kevin Barnett5f310422017-05-03 18:54:55 -05004944static void pqi_clear_all_queued_raid_bypass_retries(
4945 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05004946{
4947 unsigned long flags;
Kevin Barnett376fb882017-05-03 18:54:43 -05004948
4949 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
Kevin Barnett5f310422017-05-03 18:54:55 -05004950 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
Kevin Barnett376fb882017-05-03 18:54:43 -05004951 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4952}
4953
Kevin Barnett6c223762016-06-27 16:41:00 -05004954static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4955 void *context)
4956{
4957 struct scsi_cmnd *scmd;
4958
4959 scmd = io_request->scmd;
4960 scsi_dma_unmap(scmd);
4961 if (io_request->status == -EAGAIN)
4962 set_host_byte(scmd, DID_IMM_RETRY);
Kevin Barnett376fb882017-05-03 18:54:43 -05004963 else if (pqi_raid_bypass_retry_needed(io_request)) {
4964 pqi_queue_raid_bypass_retry(io_request);
4965 return;
4966 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004967 pqi_free_io_request(io_request);
4968 pqi_scsi_done(scmd);
4969}
4970
4971static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4972 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4973 struct pqi_queue_group *queue_group)
4974{
4975 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05004976 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
Kevin Barnett6c223762016-06-27 16:41:00 -05004977}
4978
4979static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4980 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4981 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -05004982 struct pqi_encryption_info *encryption_info, bool raid_bypass)
Kevin Barnett6c223762016-06-27 16:41:00 -05004983{
4984 int rc;
4985 struct pqi_io_request *io_request;
4986 struct pqi_aio_path_request *request;
4987
4988 io_request = pqi_alloc_io_request(ctrl_info);
4989 io_request->io_complete_callback = pqi_aio_io_complete;
4990 io_request->scmd = scmd;
Kevin Barnett376fb882017-05-03 18:54:43 -05004991 io_request->raid_bypass = raid_bypass;
Kevin Barnett6c223762016-06-27 16:41:00 -05004992
4993 request = io_request->iu;
4994 memset(request, 0,
4995 offsetof(struct pqi_raid_path_request, sg_descriptors));
4996
4997 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4998 put_unaligned_le32(aio_handle, &request->nexus_id);
4999 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5000 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5001 put_unaligned_le16(io_request->index, &request->request_id);
5002 request->error_index = request->request_id;
5003 if (cdb_length > sizeof(request->cdb))
5004 cdb_length = sizeof(request->cdb);
5005 request->cdb_length = cdb_length;
5006 memcpy(request->cdb, cdb, cdb_length);
5007
5008 switch (scmd->sc_data_direction) {
5009 case DMA_TO_DEVICE:
5010 request->data_direction = SOP_READ_FLAG;
5011 break;
5012 case DMA_FROM_DEVICE:
5013 request->data_direction = SOP_WRITE_FLAG;
5014 break;
5015 case DMA_NONE:
5016 request->data_direction = SOP_NO_DIRECTION_FLAG;
5017 break;
5018 case DMA_BIDIRECTIONAL:
5019 request->data_direction = SOP_BIDIRECTIONAL;
5020 break;
5021 default:
5022 dev_err(&ctrl_info->pci_dev->dev,
5023 "unknown data direction: %d\n",
5024 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05005025 break;
5026 }
5027
5028 if (encryption_info) {
5029 request->encryption_enable = true;
5030 put_unaligned_le16(encryption_info->data_encryption_key_index,
5031 &request->data_encryption_key_index);
5032 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5033 &request->encrypt_tweak_lower);
5034 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5035 &request->encrypt_tweak_upper);
5036 }
5037
5038 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5039 if (rc) {
5040 pqi_free_io_request(io_request);
5041 return SCSI_MLQUEUE_HOST_BUSY;
5042 }
5043
5044 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5045
5046 return 0;
5047}
5048
Kevin Barnett061ef062017-05-03 18:53:05 -05005049static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5050 struct scsi_cmnd *scmd)
5051{
5052 u16 hw_queue;
5053
5054 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5055 if (hw_queue > ctrl_info->max_hw_queue_index)
5056 hw_queue = 0;
5057
5058 return hw_queue;
5059}
5060
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005061/*
5062 * This function gets called just before we hand the completed SCSI request
5063 * back to the SML.
5064 */
5065
5066void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5067{
5068 struct pqi_scsi_dev *device;
5069
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005070 if (!scmd->device) {
5071 set_host_byte(scmd, DID_NO_CONNECT);
5072 return;
5073 }
5074
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005075 device = scmd->device->hostdata;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005076 if (!device) {
5077 set_host_byte(scmd, DID_NO_CONNECT);
5078 return;
5079 }
5080
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005081 atomic_dec(&device->scsi_cmds_outstanding);
5082}
5083
Kevin Barnett6c223762016-06-27 16:41:00 -05005084static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05005085 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05005086{
5087 int rc;
5088 struct pqi_ctrl_info *ctrl_info;
5089 struct pqi_scsi_dev *device;
Kevin Barnett061ef062017-05-03 18:53:05 -05005090 u16 hw_queue;
Kevin Barnett6c223762016-06-27 16:41:00 -05005091 struct pqi_queue_group *queue_group;
5092 bool raid_bypassed;
5093
5094 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05005095 ctrl_info = shost_to_hba(shost);
5096
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005097 if (!device) {
5098 set_host_byte(scmd, DID_NO_CONNECT);
5099 pqi_scsi_done(scmd);
5100 return 0;
5101 }
5102
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005103 atomic_inc(&device->scsi_cmds_outstanding);
5104
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005105 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5106 device)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005107 set_host_byte(scmd, DID_NO_CONNECT);
5108 pqi_scsi_done(scmd);
5109 return 0;
5110 }
5111
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005112 pqi_ctrl_busy(ctrl_info);
5113 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
5114 rc = SCSI_MLQUEUE_HOST_BUSY;
5115 goto out;
5116 }
5117
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05005118 /*
5119 * This is necessary because the SML doesn't zero out this field during
5120 * error recovery.
5121 */
5122 scmd->result = 0;
5123
Kevin Barnett061ef062017-05-03 18:53:05 -05005124 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5125 queue_group = &ctrl_info->queue_groups[hw_queue];
Kevin Barnett6c223762016-06-27 16:41:00 -05005126
5127 if (pqi_is_logical_device(device)) {
5128 raid_bypassed = false;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05005129 if (device->raid_bypass_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01005130 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005131 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5132 scmd, queue_group);
Kevin Barnett376fb882017-05-03 18:54:43 -05005133 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5134 raid_bypassed = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05005135 }
5136 if (!raid_bypassed)
5137 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5138 queue_group);
5139 } else {
5140 if (device->aio_enabled)
5141 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5142 queue_group);
5143 else
5144 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5145 queue_group);
5146 }
5147
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005148out:
5149 pqi_ctrl_unbusy(ctrl_info);
5150 if (rc)
5151 atomic_dec(&device->scsi_cmds_outstanding);
5152
Kevin Barnett6c223762016-06-27 16:41:00 -05005153 return rc;
5154}
5155
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005156static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5157 struct pqi_queue_group *queue_group)
5158{
5159 unsigned int path;
5160 unsigned long flags;
5161 bool list_is_empty;
5162
5163 for (path = 0; path < 2; path++) {
5164 while (1) {
5165 spin_lock_irqsave(
5166 &queue_group->submit_lock[path], flags);
5167 list_is_empty =
5168 list_empty(&queue_group->request_list[path]);
5169 spin_unlock_irqrestore(
5170 &queue_group->submit_lock[path], flags);
5171 if (list_is_empty)
5172 break;
5173 pqi_check_ctrl_health(ctrl_info);
5174 if (pqi_ctrl_offline(ctrl_info))
5175 return -ENXIO;
5176 usleep_range(1000, 2000);
5177 }
5178 }
5179
5180 return 0;
5181}
5182
5183static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5184{
5185 int rc;
5186 unsigned int i;
5187 unsigned int path;
5188 struct pqi_queue_group *queue_group;
5189 pqi_index_t iq_pi;
5190 pqi_index_t iq_ci;
5191
5192 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5193 queue_group = &ctrl_info->queue_groups[i];
5194
5195 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5196 if (rc)
5197 return rc;
5198
5199 for (path = 0; path < 2; path++) {
5200 iq_pi = queue_group->iq_pi_copy[path];
5201
5202 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05005203 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005204 if (iq_ci == iq_pi)
5205 break;
5206 pqi_check_ctrl_health(ctrl_info);
5207 if (pqi_ctrl_offline(ctrl_info))
5208 return -ENXIO;
5209 usleep_range(1000, 2000);
5210 }
5211 }
5212 }
5213
5214 return 0;
5215}
5216
5217static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5218 struct pqi_scsi_dev *device)
5219{
5220 unsigned int i;
5221 unsigned int path;
5222 struct pqi_queue_group *queue_group;
5223 unsigned long flags;
5224 struct pqi_io_request *io_request;
5225 struct pqi_io_request *next;
5226 struct scsi_cmnd *scmd;
5227 struct pqi_scsi_dev *scsi_device;
5228
5229 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5230 queue_group = &ctrl_info->queue_groups[i];
5231
5232 for (path = 0; path < 2; path++) {
5233 spin_lock_irqsave(
5234 &queue_group->submit_lock[path], flags);
5235
5236 list_for_each_entry_safe(io_request, next,
5237 &queue_group->request_list[path],
5238 request_list_entry) {
5239 scmd = io_request->scmd;
5240 if (!scmd)
5241 continue;
5242
5243 scsi_device = scmd->device->hostdata;
5244 if (scsi_device != device)
5245 continue;
5246
5247 list_del(&io_request->request_list_entry);
5248 set_host_byte(scmd, DID_RESET);
5249 pqi_scsi_done(scmd);
5250 }
5251
5252 spin_unlock_irqrestore(
5253 &queue_group->submit_lock[path], flags);
5254 }
5255 }
5256}
5257
Kevin Barnett061ef062017-05-03 18:53:05 -05005258static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005259 struct pqi_scsi_dev *device, unsigned long timeout_secs)
Kevin Barnett061ef062017-05-03 18:53:05 -05005260{
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005261 unsigned long timeout;
5262
5263 timeout = (timeout_secs * HZ) + jiffies;
5264
Kevin Barnett061ef062017-05-03 18:53:05 -05005265 while (atomic_read(&device->scsi_cmds_outstanding)) {
5266 pqi_check_ctrl_health(ctrl_info);
5267 if (pqi_ctrl_offline(ctrl_info))
5268 return -ENXIO;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005269 if (timeout_secs != NO_TIMEOUT) {
5270 if (time_after(jiffies, timeout)) {
5271 dev_err(&ctrl_info->pci_dev->dev,
5272 "timed out waiting for pending IO\n");
5273 return -ETIMEDOUT;
5274 }
5275 }
Kevin Barnett061ef062017-05-03 18:53:05 -05005276 usleep_range(1000, 2000);
5277 }
5278
5279 return 0;
5280}
5281
5282static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5283{
5284 bool io_pending;
5285 unsigned long flags;
5286 struct pqi_scsi_dev *device;
5287
5288 while (1) {
5289 io_pending = false;
5290
5291 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5292 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5293 scsi_device_list_entry) {
5294 if (atomic_read(&device->scsi_cmds_outstanding)) {
5295 io_pending = true;
5296 break;
5297 }
5298 }
5299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5300 flags);
5301
5302 if (!io_pending)
5303 break;
5304
5305 pqi_check_ctrl_health(ctrl_info);
5306 if (pqi_ctrl_offline(ctrl_info))
5307 return -ENXIO;
5308
5309 usleep_range(1000, 2000);
5310 }
5311
5312 return 0;
5313}
5314
Kevin Barnett14bb2152016-08-31 14:54:35 -05005315static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05005316 void *context)
5317{
5318 struct completion *waiting = context;
5319
5320 complete(waiting);
5321}
5322
Kevin Barnett14bb2152016-08-31 14:54:35 -05005323#define PQI_LUN_RESET_TIMEOUT_SECS 10
5324
5325static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5326 struct pqi_scsi_dev *device, struct completion *wait)
5327{
5328 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005329
5330 while (1) {
5331 if (wait_for_completion_io_timeout(wait,
5332 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5333 rc = 0;
5334 break;
5335 }
5336
5337 pqi_check_ctrl_health(ctrl_info);
5338 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett4e8415e2017-05-03 18:54:18 -05005339 rc = -ENXIO;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005340 break;
5341 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05005342 }
5343
5344 return rc;
5345}
5346
5347static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005348 struct pqi_scsi_dev *device)
5349{
5350 int rc;
5351 struct pqi_io_request *io_request;
5352 DECLARE_COMPLETION_ONSTACK(wait);
5353 struct pqi_task_management_request *request;
5354
Kevin Barnett6c223762016-06-27 16:41:00 -05005355 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05005356 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05005357 io_request->context = &wait;
5358
5359 request = io_request->iu;
5360 memset(request, 0, sizeof(*request));
5361
5362 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5363 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5364 &request->header.iu_length);
5365 put_unaligned_le16(io_request->index, &request->request_id);
5366 memcpy(request->lun_number, device->scsi3addr,
5367 sizeof(request->lun_number));
5368 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5369
5370 pqi_start_io(ctrl_info,
5371 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5372 io_request);
5373
Kevin Barnett14bb2152016-08-31 14:54:35 -05005374 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5375 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05005376 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05005377
5378 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05005379
5380 return rc;
5381}
5382
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005383#define PQI_LUN_RESET_RETRIES 3
5384#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
Kevin Barnett6c223762016-06-27 16:41:00 -05005385/* Performs a reset at the LUN level. */
5386
5387static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5388 struct pqi_scsi_dev *device)
5389{
5390 int rc;
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005391 unsigned int retries;
Kevin Barnett6c223762016-06-27 16:41:00 -05005392
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005393 for (retries = 0;;) {
5394 rc = pqi_lun_reset(ctrl_info, device);
5395 if (rc != -EAGAIN ||
5396 ++retries > PQI_LUN_RESET_RETRIES)
5397 break;
5398 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5399 }
Kevin Barnett061ef062017-05-03 18:53:05 -05005400 if (rc == 0)
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005401 rc = pqi_device_wait_for_pending_io(ctrl_info,
5402 device, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -05005403
Kevin Barnett14bb2152016-08-31 14:54:35 -05005404 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05005405}
5406
5407static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5408{
5409 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005410 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05005411 struct pqi_ctrl_info *ctrl_info;
5412 struct pqi_scsi_dev *device;
5413
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005414 shost = scmd->device->host;
5415 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05005416 device = scmd->device->hostdata;
5417
5418 dev_err(&ctrl_info->pci_dev->dev,
5419 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005420 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05005421
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005422 pqi_check_ctrl_health(ctrl_info);
5423 if (pqi_ctrl_offline(ctrl_info)) {
5424 rc = FAILED;
5425 goto out;
5426 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005427
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005428 mutex_lock(&ctrl_info->lun_reset_mutex);
5429
5430 pqi_ctrl_block_requests(ctrl_info);
5431 pqi_ctrl_wait_until_quiesced(ctrl_info);
5432 pqi_fail_io_queued_for_device(ctrl_info, device);
5433 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5434 pqi_device_reset_start(device);
5435 pqi_ctrl_unblock_requests(ctrl_info);
5436
5437 if (rc)
5438 rc = FAILED;
5439 else
5440 rc = pqi_device_reset(ctrl_info, device);
5441
5442 pqi_device_reset_done(device);
5443
5444 mutex_unlock(&ctrl_info->lun_reset_mutex);
5445
5446out:
Kevin Barnett6c223762016-06-27 16:41:00 -05005447 dev_err(&ctrl_info->pci_dev->dev,
5448 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005449 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05005450 rc == SUCCESS ? "SUCCESS" : "FAILED");
5451
5452 return rc;
5453}
5454
5455static int pqi_slave_alloc(struct scsi_device *sdev)
5456{
5457 struct pqi_scsi_dev *device;
5458 unsigned long flags;
5459 struct pqi_ctrl_info *ctrl_info;
5460 struct scsi_target *starget;
5461 struct sas_rphy *rphy;
5462
5463 ctrl_info = shost_to_hba(sdev->host);
5464
5465 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5466
5467 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5468 starget = scsi_target(sdev);
5469 rphy = target_to_rphy(starget);
5470 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5471 if (device) {
5472 device->target = sdev_id(sdev);
5473 device->lun = sdev->lun;
5474 device->target_lun_valid = true;
5475 }
5476 } else {
5477 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5478 sdev_id(sdev), sdev->lun);
5479 }
5480
Kevin Barnett94086f52017-05-03 18:54:31 -05005481 if (device) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005482 sdev->hostdata = device;
5483 device->sdev = sdev;
5484 if (device->queue_depth) {
5485 device->advertised_queue_depth = device->queue_depth;
5486 scsi_change_queue_depth(sdev,
5487 device->advertised_queue_depth);
5488 }
Dave Carrollb6e2ef62018-12-07 16:28:23 -06005489 if (pqi_is_logical_device(device))
5490 pqi_disable_write_same(sdev);
Dave Carroll2b447f82018-12-07 16:29:05 -06005491 else
5492 sdev->allow_restart = 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05005493 }
5494
5495 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5496
5497 return 0;
5498}
5499
Christoph Hellwig52198222016-11-01 08:12:49 -06005500static int pqi_map_queues(struct Scsi_Host *shost)
5501{
5502 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5503
Keith Buschf23f5bec2018-03-27 09:39:06 -06005504 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
Christoph Hellwig52198222016-11-01 08:12:49 -06005505}
5506
Kevin Barnett6c223762016-06-27 16:41:00 -05005507static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5508 void __user *arg)
5509{
5510 struct pci_dev *pci_dev;
5511 u32 subsystem_vendor;
5512 u32 subsystem_device;
5513 cciss_pci_info_struct pciinfo;
5514
5515 if (!arg)
5516 return -EINVAL;
5517
5518 pci_dev = ctrl_info->pci_dev;
5519
5520 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5521 pciinfo.bus = pci_dev->bus->number;
5522 pciinfo.dev_fn = pci_dev->devfn;
5523 subsystem_vendor = pci_dev->subsystem_vendor;
5524 subsystem_device = pci_dev->subsystem_device;
5525 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5526 subsystem_vendor;
5527
5528 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5529 return -EFAULT;
5530
5531 return 0;
5532}
5533
5534static int pqi_getdrivver_ioctl(void __user *arg)
5535{
5536 u32 version;
5537
5538 if (!arg)
5539 return -EINVAL;
5540
5541 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5542 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5543
5544 if (copy_to_user(arg, &version, sizeof(version)))
5545 return -EFAULT;
5546
5547 return 0;
5548}
5549
5550struct ciss_error_info {
5551 u8 scsi_status;
5552 int command_status;
5553 size_t sense_data_length;
5554};
5555
5556static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5557 struct ciss_error_info *ciss_error_info)
5558{
5559 int ciss_cmd_status;
5560 size_t sense_data_length;
5561
5562 switch (pqi_error_info->data_out_result) {
5563 case PQI_DATA_IN_OUT_GOOD:
5564 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5565 break;
5566 case PQI_DATA_IN_OUT_UNDERFLOW:
5567 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5568 break;
5569 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5570 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5571 break;
5572 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5573 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5574 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5575 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5576 case PQI_DATA_IN_OUT_ERROR:
5577 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5578 break;
5579 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5580 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5581 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5582 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5583 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5584 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5585 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5586 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5587 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5588 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5589 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5590 break;
5591 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5592 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5593 break;
5594 case PQI_DATA_IN_OUT_ABORTED:
5595 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5596 break;
5597 case PQI_DATA_IN_OUT_TIMEOUT:
5598 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5599 break;
5600 default:
5601 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5602 break;
5603 }
5604
5605 sense_data_length =
5606 get_unaligned_le16(&pqi_error_info->sense_data_length);
5607 if (sense_data_length == 0)
5608 sense_data_length =
5609 get_unaligned_le16(&pqi_error_info->response_data_length);
5610 if (sense_data_length)
5611 if (sense_data_length > sizeof(pqi_error_info->data))
5612 sense_data_length = sizeof(pqi_error_info->data);
5613
5614 ciss_error_info->scsi_status = pqi_error_info->status;
5615 ciss_error_info->command_status = ciss_cmd_status;
5616 ciss_error_info->sense_data_length = sense_data_length;
5617}
5618
5619static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5620{
5621 int rc;
5622 char *kernel_buffer = NULL;
5623 u16 iu_length;
5624 size_t sense_data_length;
5625 IOCTL_Command_struct iocommand;
5626 struct pqi_raid_path_request request;
5627 struct pqi_raid_error_info pqi_error_info;
5628 struct ciss_error_info ciss_error_info;
5629
5630 if (pqi_ctrl_offline(ctrl_info))
5631 return -ENXIO;
5632 if (!arg)
5633 return -EINVAL;
5634 if (!capable(CAP_SYS_RAWIO))
5635 return -EPERM;
5636 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5637 return -EFAULT;
5638 if (iocommand.buf_size < 1 &&
5639 iocommand.Request.Type.Direction != XFER_NONE)
5640 return -EINVAL;
5641 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5642 return -EINVAL;
5643 if (iocommand.Request.Type.Type != TYPE_CMD)
5644 return -EINVAL;
5645
5646 switch (iocommand.Request.Type.Direction) {
5647 case XFER_NONE:
5648 case XFER_WRITE:
5649 case XFER_READ:
Kevin Barnett41555d52017-08-10 13:46:51 -05005650 case XFER_READ | XFER_WRITE:
Kevin Barnett6c223762016-06-27 16:41:00 -05005651 break;
5652 default:
5653 return -EINVAL;
5654 }
5655
5656 if (iocommand.buf_size > 0) {
5657 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5658 if (!kernel_buffer)
5659 return -ENOMEM;
5660 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5661 if (copy_from_user(kernel_buffer, iocommand.buf,
5662 iocommand.buf_size)) {
5663 rc = -EFAULT;
5664 goto out;
5665 }
5666 } else {
5667 memset(kernel_buffer, 0, iocommand.buf_size);
5668 }
5669 }
5670
5671 memset(&request, 0, sizeof(request));
5672
5673 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5674 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5675 PQI_REQUEST_HEADER_LENGTH;
5676 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5677 sizeof(request.lun_number));
5678 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5679 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5680
5681 switch (iocommand.Request.Type.Direction) {
5682 case XFER_NONE:
5683 request.data_direction = SOP_NO_DIRECTION_FLAG;
5684 break;
5685 case XFER_WRITE:
5686 request.data_direction = SOP_WRITE_FLAG;
5687 break;
5688 case XFER_READ:
5689 request.data_direction = SOP_READ_FLAG;
5690 break;
Kevin Barnett41555d52017-08-10 13:46:51 -05005691 case XFER_READ | XFER_WRITE:
5692 request.data_direction = SOP_BIDIRECTIONAL;
5693 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05005694 }
5695
5696 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5697
5698 if (iocommand.buf_size > 0) {
5699 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5700
5701 rc = pqi_map_single(ctrl_info->pci_dev,
5702 &request.sg_descriptors[0], kernel_buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005703 iocommand.buf_size, DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005704 if (rc)
5705 goto out;
5706
5707 iu_length += sizeof(request.sg_descriptors[0]);
5708 }
5709
5710 put_unaligned_le16(iu_length, &request.header.iu_length);
5711
5712 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5713 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5714
5715 if (iocommand.buf_size > 0)
5716 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005717 DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005718
5719 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5720
5721 if (rc == 0) {
5722 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5723 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5724 iocommand.error_info.CommandStatus =
5725 ciss_error_info.command_status;
5726 sense_data_length = ciss_error_info.sense_data_length;
5727 if (sense_data_length) {
5728 if (sense_data_length >
5729 sizeof(iocommand.error_info.SenseInfo))
5730 sense_data_length =
5731 sizeof(iocommand.error_info.SenseInfo);
5732 memcpy(iocommand.error_info.SenseInfo,
5733 pqi_error_info.data, sense_data_length);
5734 iocommand.error_info.SenseLen = sense_data_length;
5735 }
5736 }
5737
5738 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5739 rc = -EFAULT;
5740 goto out;
5741 }
5742
5743 if (rc == 0 && iocommand.buf_size > 0 &&
5744 (iocommand.Request.Type.Direction & XFER_READ)) {
5745 if (copy_to_user(iocommand.buf, kernel_buffer,
5746 iocommand.buf_size)) {
5747 rc = -EFAULT;
5748 }
5749 }
5750
5751out:
5752 kfree(kernel_buffer);
5753
5754 return rc;
5755}
5756
5757static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5758{
5759 int rc;
5760 struct pqi_ctrl_info *ctrl_info;
5761
5762 ctrl_info = shost_to_hba(sdev->host);
5763
5764 switch (cmd) {
5765 case CCISS_DEREGDISK:
5766 case CCISS_REGNEWDISK:
5767 case CCISS_REGNEWD:
5768 rc = pqi_scan_scsi_devices(ctrl_info);
5769 break;
5770 case CCISS_GETPCIINFO:
5771 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5772 break;
5773 case CCISS_GETDRIVVER:
5774 rc = pqi_getdrivver_ioctl(arg);
5775 break;
5776 case CCISS_PASSTHRU:
5777 rc = pqi_passthru_ioctl(ctrl_info, arg);
5778 break;
5779 default:
5780 rc = -EINVAL;
5781 break;
5782 }
5783
5784 return rc;
5785}
5786
5787static ssize_t pqi_version_show(struct device *dev,
5788 struct device_attribute *attr, char *buffer)
5789{
5790 ssize_t count = 0;
5791 struct Scsi_Host *shost;
5792 struct pqi_ctrl_info *ctrl_info;
5793
5794 shost = class_to_shost(dev);
5795 ctrl_info = shost_to_hba(shost);
5796
5797 count += snprintf(buffer + count, PAGE_SIZE - count,
5798 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5799
5800 count += snprintf(buffer + count, PAGE_SIZE - count,
5801 "firmware: %s\n", ctrl_info->firmware_version);
5802
5803 return count;
5804}
5805
5806static ssize_t pqi_host_rescan_store(struct device *dev,
5807 struct device_attribute *attr, const char *buffer, size_t count)
5808{
5809 struct Scsi_Host *shost = class_to_shost(dev);
5810
5811 pqi_scan_start(shost);
5812
5813 return count;
5814}
5815
Kevin Barnett3c509762017-05-03 18:54:37 -05005816static ssize_t pqi_lockup_action_show(struct device *dev,
5817 struct device_attribute *attr, char *buffer)
5818{
5819 int count = 0;
5820 unsigned int i;
5821
5822 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5823 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5824 count += snprintf(buffer + count, PAGE_SIZE - count,
5825 "[%s] ", pqi_lockup_actions[i].name);
5826 else
5827 count += snprintf(buffer + count, PAGE_SIZE - count,
5828 "%s ", pqi_lockup_actions[i].name);
5829 }
5830
5831 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5832
5833 return count;
5834}
5835
5836static ssize_t pqi_lockup_action_store(struct device *dev,
5837 struct device_attribute *attr, const char *buffer, size_t count)
5838{
5839 unsigned int i;
5840 char *action_name;
5841 char action_name_buffer[32];
5842
5843 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5844 action_name = strstrip(action_name_buffer);
5845
5846 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5847 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5848 pqi_lockup_action = pqi_lockup_actions[i].action;
5849 return count;
5850 }
5851 }
5852
5853 return -EINVAL;
5854}
5855
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05005856static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5857static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
Kevin Barnett3c509762017-05-03 18:54:37 -05005858static DEVICE_ATTR(lockup_action, 0644,
5859 pqi_lockup_action_show, pqi_lockup_action_store);
Kevin Barnett6c223762016-06-27 16:41:00 -05005860
5861static struct device_attribute *pqi_shost_attrs[] = {
5862 &dev_attr_version,
5863 &dev_attr_rescan,
Kevin Barnett3c509762017-05-03 18:54:37 -05005864 &dev_attr_lockup_action,
Kevin Barnett6c223762016-06-27 16:41:00 -05005865 NULL
5866};
5867
Dave Carrollcd128242018-12-07 16:28:47 -06005868static ssize_t pqi_unique_id_show(struct device *dev,
5869 struct device_attribute *attr, char *buffer)
5870{
5871 struct pqi_ctrl_info *ctrl_info;
5872 struct scsi_device *sdev;
5873 struct pqi_scsi_dev *device;
5874 unsigned long flags;
5875 unsigned char uid[16];
5876
5877 sdev = to_scsi_device(dev);
5878 ctrl_info = shost_to_hba(sdev->host);
5879
5880 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5881
5882 device = sdev->hostdata;
5883 if (!device) {
5884 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5885 flags);
5886 return -ENODEV;
5887 }
5888 memcpy(uid, device->unique_id, sizeof(uid));
5889
5890 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5891
Murthy Bhat5995b232018-12-07 16:28:59 -06005892 return snprintf(buffer, PAGE_SIZE,
5893 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
5894 uid[0], uid[1], uid[2], uid[3],
5895 uid[4], uid[5], uid[6], uid[7],
5896 uid[8], uid[9], uid[10], uid[11],
5897 uid[12], uid[13], uid[14], uid[15]);
Dave Carrollcd128242018-12-07 16:28:47 -06005898}
5899
5900static ssize_t pqi_lunid_show(struct device *dev,
5901 struct device_attribute *attr, char *buffer)
5902{
5903 struct pqi_ctrl_info *ctrl_info;
5904 struct scsi_device *sdev;
5905 struct pqi_scsi_dev *device;
5906 unsigned long flags;
5907 u8 lunid[8];
5908
5909 sdev = to_scsi_device(dev);
5910 ctrl_info = shost_to_hba(sdev->host);
5911
5912 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5913
5914 device = sdev->hostdata;
5915 if (!device) {
5916 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5917 flags);
5918 return -ENODEV;
5919 }
5920 memcpy(lunid, device->scsi3addr, sizeof(lunid));
5921
5922 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5923
5924 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
5925}
5926
5927#define MAX_PATHS 8
5928static ssize_t pqi_path_info_show(struct device *dev,
5929 struct device_attribute *attr, char *buf)
5930{
5931 struct pqi_ctrl_info *ctrl_info;
5932 struct scsi_device *sdev;
5933 struct pqi_scsi_dev *device;
5934 unsigned long flags;
5935 int i;
5936 int output_len = 0;
5937 u8 box;
5938 u8 bay;
5939 u8 path_map_index = 0;
5940 char *active;
5941 unsigned char phys_connector[2];
5942
5943 sdev = to_scsi_device(dev);
5944 ctrl_info = shost_to_hba(sdev->host);
5945
5946 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5947
5948 device = sdev->hostdata;
5949 if (!device) {
5950 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5951 flags);
5952 return -ENODEV;
5953 }
5954
5955 bay = device->bay;
5956 for (i = 0; i < MAX_PATHS; i++) {
5957 path_map_index = 1<<i;
5958 if (i == device->active_path_index)
5959 active = "Active";
5960 else if (device->path_map & path_map_index)
5961 active = "Inactive";
5962 else
5963 continue;
5964
5965 output_len += scnprintf(buf + output_len,
5966 PAGE_SIZE - output_len,
5967 "[%d:%d:%d:%d] %20.20s ",
5968 ctrl_info->scsi_host->host_no,
5969 device->bus, device->target,
5970 device->lun,
5971 scsi_device_type(device->devtype));
5972
5973 if (device->devtype == TYPE_RAID ||
5974 pqi_is_logical_device(device))
5975 goto end_buffer;
5976
5977 memcpy(&phys_connector, &device->phys_connector[i],
5978 sizeof(phys_connector));
5979 if (phys_connector[0] < '0')
5980 phys_connector[0] = '0';
5981 if (phys_connector[1] < '0')
5982 phys_connector[1] = '0';
5983
5984 output_len += scnprintf(buf + output_len,
5985 PAGE_SIZE - output_len,
5986 "PORT: %.2s ", phys_connector);
5987
5988 box = device->box[i];
5989 if (box != 0 && box != 0xFF)
5990 output_len += scnprintf(buf + output_len,
5991 PAGE_SIZE - output_len,
5992 "BOX: %hhu ", box);
5993
5994 if ((device->devtype == TYPE_DISK ||
5995 device->devtype == TYPE_ZBC) &&
5996 pqi_expose_device(device))
5997 output_len += scnprintf(buf + output_len,
5998 PAGE_SIZE - output_len,
5999 "BAY: %hhu ", bay);
6000
6001end_buffer:
6002 output_len += scnprintf(buf + output_len,
6003 PAGE_SIZE - output_len,
6004 "%s\n", active);
6005 }
6006
6007 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6008 return output_len;
6009}
6010
6011
Kevin Barnett6c223762016-06-27 16:41:00 -05006012static ssize_t pqi_sas_address_show(struct device *dev,
6013 struct device_attribute *attr, char *buffer)
6014{
6015 struct pqi_ctrl_info *ctrl_info;
6016 struct scsi_device *sdev;
6017 struct pqi_scsi_dev *device;
6018 unsigned long flags;
6019 u64 sas_address;
6020
6021 sdev = to_scsi_device(dev);
6022 ctrl_info = shost_to_hba(sdev->host);
6023
6024 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6025
6026 device = sdev->hostdata;
6027 if (pqi_is_logical_device(device)) {
6028 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6029 flags);
6030 return -ENODEV;
6031 }
6032 sas_address = device->sas_address;
6033
6034 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6035
6036 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6037}
6038
6039static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6040 struct device_attribute *attr, char *buffer)
6041{
6042 struct pqi_ctrl_info *ctrl_info;
6043 struct scsi_device *sdev;
6044 struct pqi_scsi_dev *device;
6045 unsigned long flags;
6046
6047 sdev = to_scsi_device(dev);
6048 ctrl_info = shost_to_hba(sdev->host);
6049
6050 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6051
6052 device = sdev->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05006053 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
Kevin Barnett6c223762016-06-27 16:41:00 -05006054 buffer[1] = '\n';
6055 buffer[2] = '\0';
6056
6057 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6058
6059 return 2;
6060}
6061
Kevin Barnetta9f93392017-05-03 18:55:31 -05006062static ssize_t pqi_raid_level_show(struct device *dev,
6063 struct device_attribute *attr, char *buffer)
6064{
6065 struct pqi_ctrl_info *ctrl_info;
6066 struct scsi_device *sdev;
6067 struct pqi_scsi_dev *device;
6068 unsigned long flags;
6069 char *raid_level;
6070
6071 sdev = to_scsi_device(dev);
6072 ctrl_info = shost_to_hba(sdev->host);
6073
6074 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6075
6076 device = sdev->hostdata;
6077
6078 if (pqi_is_logical_device(device))
6079 raid_level = pqi_raid_level_to_string(device->raid_level);
6080 else
6081 raid_level = "N/A";
6082
6083 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6084
6085 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6086}
6087
Dave Carrollcd128242018-12-07 16:28:47 -06006088static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6089static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6090static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006091static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6092static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
Kevin Barnett6c223762016-06-27 16:41:00 -05006093 pqi_ssd_smart_path_enabled_show, NULL);
Kevin Barnetta9f93392017-05-03 18:55:31 -05006094static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
Kevin Barnett6c223762016-06-27 16:41:00 -05006095
6096static struct device_attribute *pqi_sdev_attrs[] = {
Dave Carrollcd128242018-12-07 16:28:47 -06006097 &dev_attr_lunid,
6098 &dev_attr_unique_id,
6099 &dev_attr_path_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05006100 &dev_attr_sas_address,
6101 &dev_attr_ssd_smart_path_enabled,
Kevin Barnetta9f93392017-05-03 18:55:31 -05006102 &dev_attr_raid_level,
Kevin Barnett6c223762016-06-27 16:41:00 -05006103 NULL
6104};
6105
6106static struct scsi_host_template pqi_driver_template = {
6107 .module = THIS_MODULE,
6108 .name = DRIVER_NAME_SHORT,
6109 .proc_name = DRIVER_NAME_SHORT,
6110 .queuecommand = pqi_scsi_queue_command,
6111 .scan_start = pqi_scan_start,
6112 .scan_finished = pqi_scan_finished,
6113 .this_id = -1,
Kevin Barnett6c223762016-06-27 16:41:00 -05006114 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6115 .ioctl = pqi_ioctl,
6116 .slave_alloc = pqi_slave_alloc,
Christoph Hellwig52198222016-11-01 08:12:49 -06006117 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05006118 .sdev_attrs = pqi_sdev_attrs,
6119 .shost_attrs = pqi_shost_attrs,
6120};
6121
6122static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6123{
6124 int rc;
6125 struct Scsi_Host *shost;
6126
6127 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6128 if (!shost) {
6129 dev_err(&ctrl_info->pci_dev->dev,
6130 "scsi_host_alloc failed for controller %u\n",
6131 ctrl_info->ctrl_id);
6132 return -ENOMEM;
6133 }
6134
6135 shost->io_port = 0;
6136 shost->n_io_port = 0;
6137 shost->this_id = -1;
6138 shost->max_channel = PQI_MAX_BUS;
6139 shost->max_cmd_len = MAX_COMMAND_SIZE;
6140 shost->max_lun = ~0;
6141 shost->max_id = ~0;
6142 shost->max_sectors = ctrl_info->max_sectors;
6143 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6144 shost->cmd_per_lun = shost->can_queue;
6145 shost->sg_tablesize = ctrl_info->sg_tablesize;
6146 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06006147 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05006148 shost->unique_id = shost->irq;
6149 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6150 shost->hostdata[0] = (unsigned long)ctrl_info;
6151
6152 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6153 if (rc) {
6154 dev_err(&ctrl_info->pci_dev->dev,
6155 "scsi_add_host failed for controller %u\n",
6156 ctrl_info->ctrl_id);
6157 goto free_host;
6158 }
6159
6160 rc = pqi_add_sas_host(shost, ctrl_info);
6161 if (rc) {
6162 dev_err(&ctrl_info->pci_dev->dev,
6163 "add SAS host failed for controller %u\n",
6164 ctrl_info->ctrl_id);
6165 goto remove_host;
6166 }
6167
6168 ctrl_info->scsi_host = shost;
6169
6170 return 0;
6171
6172remove_host:
6173 scsi_remove_host(shost);
6174free_host:
6175 scsi_host_put(shost);
6176
6177 return rc;
6178}
6179
6180static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6181{
6182 struct Scsi_Host *shost;
6183
6184 pqi_delete_sas_host(ctrl_info);
6185
6186 shost = ctrl_info->scsi_host;
6187 if (!shost)
6188 return;
6189
6190 scsi_remove_host(shost);
6191 scsi_host_put(shost);
6192}
6193
Kevin Barnett336b6812017-08-10 13:46:39 -05006194static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6195{
6196 int rc = 0;
6197 struct pqi_device_registers __iomem *pqi_registers;
6198 unsigned long timeout;
6199 unsigned int timeout_msecs;
6200 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05006201
Kevin Barnett336b6812017-08-10 13:46:39 -05006202 pqi_registers = ctrl_info->pqi_registers;
6203 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6204 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6205
6206 while (1) {
6207 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6208 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6209 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6210 break;
6211 pqi_check_ctrl_health(ctrl_info);
6212 if (pqi_ctrl_offline(ctrl_info)) {
6213 rc = -ENXIO;
6214 break;
6215 }
6216 if (time_after(jiffies, timeout)) {
6217 rc = -ETIMEDOUT;
6218 break;
6219 }
6220 }
6221
6222 return rc;
6223}
Kevin Barnett6c223762016-06-27 16:41:00 -05006224
6225static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6226{
6227 int rc;
Kevin Barnett336b6812017-08-10 13:46:39 -05006228 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05006229
Kevin Barnett336b6812017-08-10 13:46:39 -05006230 if (ctrl_info->pqi_reset_quiesce_supported) {
6231 rc = sis_pqi_reset_quiesce(ctrl_info);
6232 if (rc) {
6233 dev_err(&ctrl_info->pci_dev->dev,
6234 "PQI reset failed during quiesce with error %d\n",
6235 rc);
6236 return rc;
6237 }
6238 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006239
Kevin Barnett336b6812017-08-10 13:46:39 -05006240 reset_reg.all_bits = 0;
6241 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6242 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
Kevin Barnett6c223762016-06-27 16:41:00 -05006243
Kevin Barnett336b6812017-08-10 13:46:39 -05006244 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6245
6246 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006247 if (rc)
6248 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett336b6812017-08-10 13:46:39 -05006249 "PQI reset failed with error %d\n", rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05006250
6251 return rc;
6252}
6253
6254static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
6255{
6256 int rc;
6257 struct bmic_identify_controller *identify;
6258
6259 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6260 if (!identify)
6261 return -ENOMEM;
6262
6263 rc = pqi_identify_controller(ctrl_info, identify);
6264 if (rc)
6265 goto out;
6266
6267 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6268 sizeof(identify->firmware_version));
6269 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6270 snprintf(ctrl_info->firmware_version +
6271 strlen(ctrl_info->firmware_version),
6272 sizeof(ctrl_info->firmware_version),
6273 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6274
6275out:
6276 kfree(identify);
6277
6278 return rc;
6279}
6280
Kevin Barnettb212c252018-12-07 16:28:10 -06006281struct pqi_config_table_section_info {
6282 struct pqi_ctrl_info *ctrl_info;
6283 void *section;
6284 u32 section_offset;
6285 void __iomem *section_iomem_addr;
6286};
6287
6288static inline bool pqi_is_firmware_feature_supported(
6289 struct pqi_config_table_firmware_features *firmware_features,
6290 unsigned int bit_position)
6291{
6292 unsigned int byte_index;
6293
6294 byte_index = bit_position / BITS_PER_BYTE;
6295
6296 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6297 return false;
6298
6299 return firmware_features->features_supported[byte_index] &
6300 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6301}
6302
6303static inline bool pqi_is_firmware_feature_enabled(
6304 struct pqi_config_table_firmware_features *firmware_features,
6305 void __iomem *firmware_features_iomem_addr,
6306 unsigned int bit_position)
6307{
6308 unsigned int byte_index;
6309 u8 __iomem *features_enabled_iomem_addr;
6310
6311 byte_index = (bit_position / BITS_PER_BYTE) +
6312 (le16_to_cpu(firmware_features->num_elements) * 2);
6313
6314 features_enabled_iomem_addr = firmware_features_iomem_addr +
6315 offsetof(struct pqi_config_table_firmware_features,
6316 features_supported) + byte_index;
6317
6318 return *((__force u8 *)features_enabled_iomem_addr) &
6319 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6320}
6321
6322static inline void pqi_request_firmware_feature(
6323 struct pqi_config_table_firmware_features *firmware_features,
6324 unsigned int bit_position)
6325{
6326 unsigned int byte_index;
6327
6328 byte_index = (bit_position / BITS_PER_BYTE) +
6329 le16_to_cpu(firmware_features->num_elements);
6330
6331 firmware_features->features_supported[byte_index] |=
6332 (1 << (bit_position % BITS_PER_BYTE));
6333}
6334
6335static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6336 u16 first_section, u16 last_section)
6337{
6338 struct pqi_vendor_general_request request;
6339
6340 memset(&request, 0, sizeof(request));
6341
6342 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6343 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6344 &request.header.iu_length);
6345 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6346 &request.function_code);
6347 put_unaligned_le16(first_section,
6348 &request.data.config_table_update.first_section);
6349 put_unaligned_le16(last_section,
6350 &request.data.config_table_update.last_section);
6351
6352 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6353 0, NULL, NO_TIMEOUT);
6354}
6355
6356static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6357 struct pqi_config_table_firmware_features *firmware_features,
6358 void __iomem *firmware_features_iomem_addr)
6359{
6360 void *features_requested;
6361 void __iomem *features_requested_iomem_addr;
6362
6363 features_requested = firmware_features->features_supported +
6364 le16_to_cpu(firmware_features->num_elements);
6365
6366 features_requested_iomem_addr = firmware_features_iomem_addr +
6367 (features_requested - (void *)firmware_features);
6368
6369 memcpy_toio(features_requested_iomem_addr, features_requested,
6370 le16_to_cpu(firmware_features->num_elements));
6371
6372 return pqi_config_table_update(ctrl_info,
6373 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6374 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6375}
6376
6377struct pqi_firmware_feature {
6378 char *feature_name;
6379 unsigned int feature_bit;
6380 bool supported;
6381 bool enabled;
6382 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6383 struct pqi_firmware_feature *firmware_feature);
6384};
6385
6386static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6387 struct pqi_firmware_feature *firmware_feature)
6388{
6389 if (!firmware_feature->supported) {
6390 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6391 firmware_feature->feature_name);
6392 return;
6393 }
6394
6395 if (firmware_feature->enabled) {
6396 dev_info(&ctrl_info->pci_dev->dev,
6397 "%s enabled\n", firmware_feature->feature_name);
6398 return;
6399 }
6400
6401 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6402 firmware_feature->feature_name);
6403}
6404
6405static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6406 struct pqi_firmware_feature *firmware_feature)
6407{
6408 if (firmware_feature->feature_status)
6409 firmware_feature->feature_status(ctrl_info, firmware_feature);
6410}
6411
6412static DEFINE_MUTEX(pqi_firmware_features_mutex);
6413
6414static struct pqi_firmware_feature pqi_firmware_features[] = {
6415 {
6416 .feature_name = "Online Firmware Activation",
6417 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6418 .feature_status = pqi_firmware_feature_status,
6419 },
6420 {
6421 .feature_name = "Serial Management Protocol",
6422 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6423 .feature_status = pqi_firmware_feature_status,
6424 },
6425};
6426
6427static void pqi_process_firmware_features(
6428 struct pqi_config_table_section_info *section_info)
6429{
6430 int rc;
6431 struct pqi_ctrl_info *ctrl_info;
6432 struct pqi_config_table_firmware_features *firmware_features;
6433 void __iomem *firmware_features_iomem_addr;
6434 unsigned int i;
6435 unsigned int num_features_supported;
6436
6437 ctrl_info = section_info->ctrl_info;
6438 firmware_features = section_info->section;
6439 firmware_features_iomem_addr = section_info->section_iomem_addr;
6440
6441 for (i = 0, num_features_supported = 0;
6442 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6443 if (pqi_is_firmware_feature_supported(firmware_features,
6444 pqi_firmware_features[i].feature_bit)) {
6445 pqi_firmware_features[i].supported = true;
6446 num_features_supported++;
6447 } else {
6448 pqi_firmware_feature_update(ctrl_info,
6449 &pqi_firmware_features[i]);
6450 }
6451 }
6452
6453 if (num_features_supported == 0)
6454 return;
6455
6456 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6457 if (!pqi_firmware_features[i].supported)
6458 continue;
6459 pqi_request_firmware_feature(firmware_features,
6460 pqi_firmware_features[i].feature_bit);
6461 }
6462
6463 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6464 firmware_features_iomem_addr);
6465 if (rc) {
6466 dev_err(&ctrl_info->pci_dev->dev,
6467 "failed to enable firmware features in PQI configuration table\n");
6468 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6469 if (!pqi_firmware_features[i].supported)
6470 continue;
6471 pqi_firmware_feature_update(ctrl_info,
6472 &pqi_firmware_features[i]);
6473 }
6474 return;
6475 }
6476
6477 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6478 if (!pqi_firmware_features[i].supported)
6479 continue;
6480 if (pqi_is_firmware_feature_enabled(firmware_features,
6481 firmware_features_iomem_addr,
6482 pqi_firmware_features[i].feature_bit))
6483 pqi_firmware_features[i].enabled = true;
6484 pqi_firmware_feature_update(ctrl_info,
6485 &pqi_firmware_features[i]);
6486 }
6487}
6488
6489static void pqi_init_firmware_features(void)
6490{
6491 unsigned int i;
6492
6493 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6494 pqi_firmware_features[i].supported = false;
6495 pqi_firmware_features[i].enabled = false;
6496 }
6497}
6498
6499static void pqi_process_firmware_features_section(
6500 struct pqi_config_table_section_info *section_info)
6501{
6502 mutex_lock(&pqi_firmware_features_mutex);
6503 pqi_init_firmware_features();
6504 pqi_process_firmware_features(section_info);
6505 mutex_unlock(&pqi_firmware_features_mutex);
6506}
6507
Kevin Barnett98f87662017-05-03 18:53:11 -05006508static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6509{
6510 u32 table_length;
6511 u32 section_offset;
6512 void __iomem *table_iomem_addr;
6513 struct pqi_config_table *config_table;
6514 struct pqi_config_table_section_header *section;
Kevin Barnettb212c252018-12-07 16:28:10 -06006515 struct pqi_config_table_section_info section_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006516
6517 table_length = ctrl_info->config_table_length;
Kevin Barnettb212c252018-12-07 16:28:10 -06006518 if (table_length == 0)
6519 return 0;
Kevin Barnett98f87662017-05-03 18:53:11 -05006520
6521 config_table = kmalloc(table_length, GFP_KERNEL);
6522 if (!config_table) {
6523 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006524 "failed to allocate memory for PQI configuration table\n");
Kevin Barnett98f87662017-05-03 18:53:11 -05006525 return -ENOMEM;
6526 }
6527
6528 /*
6529 * Copy the config table contents from I/O memory space into the
6530 * temporary buffer.
6531 */
6532 table_iomem_addr = ctrl_info->iomem_base +
6533 ctrl_info->config_table_offset;
6534 memcpy_fromio(config_table, table_iomem_addr, table_length);
6535
Kevin Barnettb212c252018-12-07 16:28:10 -06006536 section_info.ctrl_info = ctrl_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006537 section_offset =
6538 get_unaligned_le32(&config_table->first_section_offset);
6539
6540 while (section_offset) {
6541 section = (void *)config_table + section_offset;
6542
Kevin Barnettb212c252018-12-07 16:28:10 -06006543 section_info.section = section;
6544 section_info.section_offset = section_offset;
6545 section_info.section_iomem_addr =
6546 table_iomem_addr + section_offset;
6547
Kevin Barnett98f87662017-05-03 18:53:11 -05006548 switch (get_unaligned_le16(&section->section_id)) {
Kevin Barnettb212c252018-12-07 16:28:10 -06006549 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
6550 pqi_process_firmware_features_section(&section_info);
6551 break;
Kevin Barnett98f87662017-05-03 18:53:11 -05006552 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
Kevin Barnett5a259e32017-05-03 18:55:43 -05006553 if (pqi_disable_heartbeat)
6554 dev_warn(&ctrl_info->pci_dev->dev,
6555 "heartbeat disabled by module parameter\n");
6556 else
6557 ctrl_info->heartbeat_counter =
6558 table_iomem_addr +
6559 section_offset +
6560 offsetof(
6561 struct pqi_config_table_heartbeat,
6562 heartbeat_counter);
Kevin Barnett98f87662017-05-03 18:53:11 -05006563 break;
6564 }
6565
6566 section_offset =
6567 get_unaligned_le16(&section->next_section_offset);
6568 }
6569
6570 kfree(config_table);
6571
6572 return 0;
6573}
6574
Kevin Barnett162d7752017-05-03 18:52:46 -05006575/* Switches the controller from PQI mode back into SIS mode. */
6576
6577static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6578{
6579 int rc;
6580
Kevin Barnett061ef062017-05-03 18:53:05 -05006581 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
Kevin Barnett162d7752017-05-03 18:52:46 -05006582 rc = pqi_reset(ctrl_info);
6583 if (rc)
6584 return rc;
Kevin Barnett4f078e22017-08-10 13:46:57 -05006585 rc = sis_reenable_sis_mode(ctrl_info);
6586 if (rc) {
6587 dev_err(&ctrl_info->pci_dev->dev,
6588 "re-enabling SIS mode failed with error %d\n", rc);
6589 return rc;
6590 }
Kevin Barnett162d7752017-05-03 18:52:46 -05006591 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6592
6593 return 0;
6594}
6595
6596/*
6597 * If the controller isn't already in SIS mode, this function forces it into
6598 * SIS mode.
6599 */
6600
6601static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05006602{
6603 if (!sis_is_firmware_running(ctrl_info))
6604 return -ENXIO;
6605
Kevin Barnett162d7752017-05-03 18:52:46 -05006606 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6607 return 0;
6608
6609 if (sis_is_kernel_up(ctrl_info)) {
6610 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6611 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006612 }
6613
Kevin Barnett162d7752017-05-03 18:52:46 -05006614 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05006615}
6616
Kevin Barnett6c223762016-06-27 16:41:00 -05006617static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6618{
6619 int rc;
6620
Kevin Barnett162d7752017-05-03 18:52:46 -05006621 rc = pqi_force_sis_mode(ctrl_info);
6622 if (rc)
6623 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006624
6625 /*
6626 * Wait until the controller is ready to start accepting SIS
6627 * commands.
6628 */
6629 rc = sis_wait_for_ctrl_ready(ctrl_info);
Kevin Barnett8845fdf2017-05-03 18:53:36 -05006630 if (rc)
Kevin Barnett6c223762016-06-27 16:41:00 -05006631 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006632
6633 /*
6634 * Get the controller properties. This allows us to determine
6635 * whether or not it supports PQI mode.
6636 */
6637 rc = sis_get_ctrl_properties(ctrl_info);
6638 if (rc) {
6639 dev_err(&ctrl_info->pci_dev->dev,
6640 "error obtaining controller properties\n");
6641 return rc;
6642 }
6643
6644 rc = sis_get_pqi_capabilities(ctrl_info);
6645 if (rc) {
6646 dev_err(&ctrl_info->pci_dev->dev,
6647 "error obtaining controller capabilities\n");
6648 return rc;
6649 }
6650
Kevin Barnettd727a772017-05-03 18:54:25 -05006651 if (reset_devices) {
6652 if (ctrl_info->max_outstanding_requests >
6653 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6654 ctrl_info->max_outstanding_requests =
6655 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6656 } else {
6657 if (ctrl_info->max_outstanding_requests >
6658 PQI_MAX_OUTSTANDING_REQUESTS)
6659 ctrl_info->max_outstanding_requests =
6660 PQI_MAX_OUTSTANDING_REQUESTS;
6661 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006662
6663 pqi_calculate_io_resources(ctrl_info);
6664
6665 rc = pqi_alloc_error_buffer(ctrl_info);
6666 if (rc) {
6667 dev_err(&ctrl_info->pci_dev->dev,
6668 "failed to allocate PQI error buffer\n");
6669 return rc;
6670 }
6671
6672 /*
6673 * If the function we are about to call succeeds, the
6674 * controller will transition from legacy SIS mode
6675 * into PQI mode.
6676 */
6677 rc = sis_init_base_struct_addr(ctrl_info);
6678 if (rc) {
6679 dev_err(&ctrl_info->pci_dev->dev,
6680 "error initializing PQI mode\n");
6681 return rc;
6682 }
6683
6684 /* Wait for the controller to complete the SIS -> PQI transition. */
6685 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6686 if (rc) {
6687 dev_err(&ctrl_info->pci_dev->dev,
6688 "transition to PQI mode failed\n");
6689 return rc;
6690 }
6691
6692 /* From here on, we are running in PQI mode. */
6693 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006694 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05006695
6696 rc = pqi_alloc_admin_queues(ctrl_info);
6697 if (rc) {
6698 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006699 "failed to allocate admin queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006700 return rc;
6701 }
6702
6703 rc = pqi_create_admin_queues(ctrl_info);
6704 if (rc) {
6705 dev_err(&ctrl_info->pci_dev->dev,
6706 "error creating admin queues\n");
6707 return rc;
6708 }
6709
6710 rc = pqi_report_device_capability(ctrl_info);
6711 if (rc) {
6712 dev_err(&ctrl_info->pci_dev->dev,
6713 "obtaining device capability failed\n");
6714 return rc;
6715 }
6716
6717 rc = pqi_validate_device_capability(ctrl_info);
6718 if (rc)
6719 return rc;
6720
6721 pqi_calculate_queue_resources(ctrl_info);
6722
6723 rc = pqi_enable_msix_interrupts(ctrl_info);
6724 if (rc)
6725 return rc;
6726
6727 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6728 ctrl_info->max_msix_vectors =
6729 ctrl_info->num_msix_vectors_enabled;
6730 pqi_calculate_queue_resources(ctrl_info);
6731 }
6732
6733 rc = pqi_alloc_io_resources(ctrl_info);
6734 if (rc)
6735 return rc;
6736
6737 rc = pqi_alloc_operational_queues(ctrl_info);
Kevin Barnettd87d5472017-05-03 18:54:00 -05006738 if (rc) {
6739 dev_err(&ctrl_info->pci_dev->dev,
6740 "failed to allocate operational queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006741 return rc;
Kevin Barnettd87d5472017-05-03 18:54:00 -05006742 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006743
6744 pqi_init_operational_queues(ctrl_info);
6745
6746 rc = pqi_request_irqs(ctrl_info);
6747 if (rc)
6748 return rc;
6749
Kevin Barnett6c223762016-06-27 16:41:00 -05006750 rc = pqi_create_queues(ctrl_info);
6751 if (rc)
6752 return rc;
6753
Kevin Barnett061ef062017-05-03 18:53:05 -05006754 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6755
6756 ctrl_info->controller_online = true;
Kevin Barnettb212c252018-12-07 16:28:10 -06006757
6758 rc = pqi_process_config_table(ctrl_info);
6759 if (rc)
6760 return rc;
6761
Kevin Barnett061ef062017-05-03 18:53:05 -05006762 pqi_start_heartbeat_timer(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006763
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05006764 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006765 if (rc) {
6766 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05006767 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006768 return rc;
6769 }
6770
Kevin Barnett6c223762016-06-27 16:41:00 -05006771 /* Register with the SCSI subsystem. */
6772 rc = pqi_register_scsi(ctrl_info);
6773 if (rc)
6774 return rc;
6775
6776 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6777 if (rc) {
6778 dev_err(&ctrl_info->pci_dev->dev,
6779 "error obtaining firmware version\n");
6780 return rc;
6781 }
6782
Dave Carroll171c2862018-12-07 16:28:35 -06006783 rc = pqi_set_diag_rescan(ctrl_info);
6784 if (rc) {
6785 dev_err(&ctrl_info->pci_dev->dev,
6786 "error enabling multi-lun rescan\n");
6787 return rc;
6788 }
6789
Kevin Barnett6c223762016-06-27 16:41:00 -05006790 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6791 if (rc) {
6792 dev_err(&ctrl_info->pci_dev->dev,
6793 "error updating host wellness\n");
6794 return rc;
6795 }
6796
6797 pqi_schedule_update_time_worker(ctrl_info);
6798
6799 pqi_scan_scsi_devices(ctrl_info);
6800
6801 return 0;
6802}
6803
Kevin Barnett061ef062017-05-03 18:53:05 -05006804static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6805{
6806 unsigned int i;
6807 struct pqi_admin_queues *admin_queues;
6808 struct pqi_event_queue *event_queue;
6809
6810 admin_queues = &ctrl_info->admin_queues;
6811 admin_queues->iq_pi_copy = 0;
6812 admin_queues->oq_ci_copy = 0;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006813 writel(0, admin_queues->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006814
6815 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6816 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6817 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6818 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6819
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006820 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
6821 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
6822 writel(0, ctrl_info->queue_groups[i].oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006823 }
6824
6825 event_queue = &ctrl_info->event_queue;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006826 writel(0, event_queue->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006827 event_queue->oq_ci_copy = 0;
6828}
6829
6830static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6831{
6832 int rc;
6833
6834 rc = pqi_force_sis_mode(ctrl_info);
6835 if (rc)
6836 return rc;
6837
6838 /*
6839 * Wait until the controller is ready to start accepting SIS
6840 * commands.
6841 */
6842 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6843 if (rc)
6844 return rc;
6845
6846 /*
6847 * If the function we are about to call succeeds, the
6848 * controller will transition from legacy SIS mode
6849 * into PQI mode.
6850 */
6851 rc = sis_init_base_struct_addr(ctrl_info);
6852 if (rc) {
6853 dev_err(&ctrl_info->pci_dev->dev,
6854 "error initializing PQI mode\n");
6855 return rc;
6856 }
6857
6858 /* Wait for the controller to complete the SIS -> PQI transition. */
6859 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6860 if (rc) {
6861 dev_err(&ctrl_info->pci_dev->dev,
6862 "transition to PQI mode failed\n");
6863 return rc;
6864 }
6865
6866 /* From here on, we are running in PQI mode. */
6867 ctrl_info->pqi_mode_enabled = true;
6868 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6869
6870 pqi_reinit_queues(ctrl_info);
6871
6872 rc = pqi_create_admin_queues(ctrl_info);
6873 if (rc) {
6874 dev_err(&ctrl_info->pci_dev->dev,
6875 "error creating admin queues\n");
6876 return rc;
6877 }
6878
6879 rc = pqi_create_queues(ctrl_info);
6880 if (rc)
6881 return rc;
6882
6883 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6884
6885 ctrl_info->controller_online = true;
6886 pqi_start_heartbeat_timer(ctrl_info);
6887 pqi_ctrl_unblock_requests(ctrl_info);
6888
6889 rc = pqi_enable_events(ctrl_info);
6890 if (rc) {
6891 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006892 "error enabling events\n");
Kevin Barnett061ef062017-05-03 18:53:05 -05006893 return rc;
6894 }
6895
Dave Carroll171c2862018-12-07 16:28:35 -06006896 rc = pqi_set_diag_rescan(ctrl_info);
6897 if (rc) {
6898 dev_err(&ctrl_info->pci_dev->dev,
6899 "error enabling multi-lun rescan\n");
6900 return rc;
6901 }
6902
Kevin Barnett061ef062017-05-03 18:53:05 -05006903 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6904 if (rc) {
6905 dev_err(&ctrl_info->pci_dev->dev,
6906 "error updating host wellness\n");
6907 return rc;
6908 }
6909
6910 pqi_schedule_update_time_worker(ctrl_info);
6911
6912 pqi_scan_scsi_devices(ctrl_info);
6913
6914 return 0;
6915}
6916
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006917static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6918 u16 timeout)
6919{
6920 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6921 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6922}
6923
Kevin Barnett6c223762016-06-27 16:41:00 -05006924static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6925{
6926 int rc;
6927 u64 mask;
6928
6929 rc = pci_enable_device(ctrl_info->pci_dev);
6930 if (rc) {
6931 dev_err(&ctrl_info->pci_dev->dev,
6932 "failed to enable PCI device\n");
6933 return rc;
6934 }
6935
6936 if (sizeof(dma_addr_t) > 4)
6937 mask = DMA_BIT_MASK(64);
6938 else
6939 mask = DMA_BIT_MASK(32);
6940
6941 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6942 if (rc) {
6943 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6944 goto disable_device;
6945 }
6946
6947 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6948 if (rc) {
6949 dev_err(&ctrl_info->pci_dev->dev,
6950 "failed to obtain PCI resources\n");
6951 goto disable_device;
6952 }
6953
6954 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6955 ctrl_info->pci_dev, 0),
6956 sizeof(struct pqi_ctrl_registers));
6957 if (!ctrl_info->iomem_base) {
6958 dev_err(&ctrl_info->pci_dev->dev,
6959 "failed to map memory for controller registers\n");
6960 rc = -ENOMEM;
6961 goto release_regions;
6962 }
6963
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006964#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6965
6966 /* Increase the PCIe completion timeout. */
6967 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6968 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6969 if (rc) {
6970 dev_err(&ctrl_info->pci_dev->dev,
6971 "failed to set PCIe completion timeout\n");
6972 goto release_regions;
6973 }
6974
Kevin Barnett6c223762016-06-27 16:41:00 -05006975 /* Enable bus mastering. */
6976 pci_set_master(ctrl_info->pci_dev);
6977
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006978 ctrl_info->registers = ctrl_info->iomem_base;
6979 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6980
Kevin Barnett6c223762016-06-27 16:41:00 -05006981 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6982
6983 return 0;
6984
6985release_regions:
6986 pci_release_regions(ctrl_info->pci_dev);
6987disable_device:
6988 pci_disable_device(ctrl_info->pci_dev);
6989
6990 return rc;
6991}
6992
6993static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6994{
6995 iounmap(ctrl_info->iomem_base);
6996 pci_release_regions(ctrl_info->pci_dev);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006997 if (pci_is_enabled(ctrl_info->pci_dev))
6998 pci_disable_device(ctrl_info->pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006999 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7000}
7001
7002static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7003{
7004 struct pqi_ctrl_info *ctrl_info;
7005
7006 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7007 GFP_KERNEL, numa_node);
7008 if (!ctrl_info)
7009 return NULL;
7010
7011 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05007012 mutex_init(&ctrl_info->lun_reset_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05007013
7014 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7015 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7016
7017 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7018 atomic_set(&ctrl_info->num_interrupts, 0);
7019
7020 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7021 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7022
Kees Cook74a0f572017-10-11 16:27:10 -07007023 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
Kevin Barnett5f310422017-05-03 18:54:55 -05007024 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
Kevin Barnett98f87662017-05-03 18:53:11 -05007025
Kevin Barnett6c223762016-06-27 16:41:00 -05007026 sema_init(&ctrl_info->sync_request_sem,
7027 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05007028 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05007029
Kevin Barnett376fb882017-05-03 18:54:43 -05007030 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7031 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7032 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7033 pqi_raid_bypass_retry_worker);
7034
Kevin Barnett6c223762016-06-27 16:41:00 -05007035 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
Kevin Barnett061ef062017-05-03 18:53:05 -05007036 ctrl_info->irq_mode = IRQ_MODE_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -05007037 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7038
7039 return ctrl_info;
7040}
7041
7042static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7043{
7044 kfree(ctrl_info);
7045}
7046
7047static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7048{
Kevin Barnett98bf0612017-05-03 18:52:28 -05007049 pqi_free_irqs(ctrl_info);
7050 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007051}
7052
7053static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7054{
7055 pqi_stop_heartbeat_timer(ctrl_info);
7056 pqi_free_interrupts(ctrl_info);
7057 if (ctrl_info->queue_memory_base)
7058 dma_free_coherent(&ctrl_info->pci_dev->dev,
7059 ctrl_info->queue_memory_length,
7060 ctrl_info->queue_memory_base,
7061 ctrl_info->queue_memory_base_dma_handle);
7062 if (ctrl_info->admin_queue_memory_base)
7063 dma_free_coherent(&ctrl_info->pci_dev->dev,
7064 ctrl_info->admin_queue_memory_length,
7065 ctrl_info->admin_queue_memory_base,
7066 ctrl_info->admin_queue_memory_base_dma_handle);
7067 pqi_free_all_io_requests(ctrl_info);
7068 if (ctrl_info->error_buffer)
7069 dma_free_coherent(&ctrl_info->pci_dev->dev,
7070 ctrl_info->error_buffer_length,
7071 ctrl_info->error_buffer,
7072 ctrl_info->error_buffer_dma_handle);
7073 if (ctrl_info->iomem_base)
7074 pqi_cleanup_pci_init(ctrl_info);
7075 pqi_free_ctrl_info(ctrl_info);
7076}
7077
7078static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7079{
Kevin Barnett061ef062017-05-03 18:53:05 -05007080 pqi_cancel_rescan_worker(ctrl_info);
7081 pqi_cancel_update_time_worker(ctrl_info);
Kevin Barnette57a1f92016-08-31 14:54:47 -05007082 pqi_remove_all_scsi_devices(ctrl_info);
7083 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05007084 if (ctrl_info->pqi_mode_enabled)
7085 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007086 pqi_free_ctrl_resources(ctrl_info);
7087}
7088
Kevin Barnett3c509762017-05-03 18:54:37 -05007089static void pqi_perform_lockup_action(void)
7090{
7091 switch (pqi_lockup_action) {
7092 case PANIC:
7093 panic("FATAL: Smart Family Controller lockup detected");
7094 break;
7095 case REBOOT:
7096 emergency_restart();
7097 break;
7098 case NONE:
7099 default:
7100 break;
7101 }
7102}
7103
Kevin Barnett5f310422017-05-03 18:54:55 -05007104static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7105 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7106 .status = SAM_STAT_CHECK_CONDITION,
7107};
7108
7109static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05007110{
7111 unsigned int i;
Kevin Barnett376fb882017-05-03 18:54:43 -05007112 struct pqi_io_request *io_request;
Kevin Barnett376fb882017-05-03 18:54:43 -05007113 struct scsi_cmnd *scmd;
7114
Kevin Barnett5f310422017-05-03 18:54:55 -05007115 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7116 io_request = &ctrl_info->io_request_pool[i];
7117 if (atomic_read(&io_request->refcount) == 0)
7118 continue;
Kevin Barnett376fb882017-05-03 18:54:43 -05007119
Kevin Barnett5f310422017-05-03 18:54:55 -05007120 scmd = io_request->scmd;
7121 if (scmd) {
7122 set_host_byte(scmd, DID_NO_CONNECT);
7123 } else {
7124 io_request->status = -ENXIO;
7125 io_request->error_info =
7126 &pqi_ctrl_offline_raid_error_info;
Kevin Barnett376fb882017-05-03 18:54:43 -05007127 }
Kevin Barnett5f310422017-05-03 18:54:55 -05007128
7129 io_request->io_complete_callback(io_request,
7130 io_request->context);
Kevin Barnett376fb882017-05-03 18:54:43 -05007131 }
7132}
7133
Kevin Barnett5f310422017-05-03 18:54:55 -05007134static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05007135{
Kevin Barnett5f310422017-05-03 18:54:55 -05007136 pqi_perform_lockup_action();
7137 pqi_stop_heartbeat_timer(ctrl_info);
7138 pqi_free_interrupts(ctrl_info);
7139 pqi_cancel_rescan_worker(ctrl_info);
7140 pqi_cancel_update_time_worker(ctrl_info);
7141 pqi_ctrl_wait_until_quiesced(ctrl_info);
7142 pqi_fail_all_outstanding_requests(ctrl_info);
7143 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7144 pqi_ctrl_unblock_requests(ctrl_info);
7145}
7146
7147static void pqi_ctrl_offline_worker(struct work_struct *work)
7148{
7149 struct pqi_ctrl_info *ctrl_info;
7150
7151 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7152 pqi_take_ctrl_offline_deferred(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05007153}
7154
7155static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7156{
Kevin Barnett5f310422017-05-03 18:54:55 -05007157 if (!ctrl_info->controller_online)
7158 return;
7159
Kevin Barnett376fb882017-05-03 18:54:43 -05007160 ctrl_info->controller_online = false;
Kevin Barnett5f310422017-05-03 18:54:55 -05007161 ctrl_info->pqi_mode_enabled = false;
7162 pqi_ctrl_block_requests(ctrl_info);
Kevin Barnett5a259e32017-05-03 18:55:43 -05007163 if (!pqi_disable_ctrl_shutdown)
7164 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05007165 pci_disable_device(ctrl_info->pci_dev);
7166 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5f310422017-05-03 18:54:55 -05007167 schedule_work(&ctrl_info->ctrl_offline_work);
Kevin Barnett376fb882017-05-03 18:54:43 -05007168}
7169
Kevin Barnettd91d7822017-05-03 18:53:30 -05007170static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007171 const struct pci_device_id *id)
7172{
7173 char *ctrl_description;
7174
Kevin Barnett37b36842017-05-03 18:55:01 -05007175 if (id->driver_data)
Kevin Barnett6c223762016-06-27 16:41:00 -05007176 ctrl_description = (char *)id->driver_data;
Kevin Barnett37b36842017-05-03 18:55:01 -05007177 else
7178 ctrl_description = "Microsemi Smart Family Controller";
Kevin Barnett6c223762016-06-27 16:41:00 -05007179
Kevin Barnettd91d7822017-05-03 18:53:30 -05007180 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
Kevin Barnett6c223762016-06-27 16:41:00 -05007181}
7182
Kevin Barnettd91d7822017-05-03 18:53:30 -05007183static int pqi_pci_probe(struct pci_dev *pci_dev,
7184 const struct pci_device_id *id)
Kevin Barnett6c223762016-06-27 16:41:00 -05007185{
7186 int rc;
Sagar Biradar62dc51f2018-12-07 16:29:12 -06007187 int node, cp_node;
Kevin Barnett6c223762016-06-27 16:41:00 -05007188 struct pqi_ctrl_info *ctrl_info;
7189
Kevin Barnettd91d7822017-05-03 18:53:30 -05007190 pqi_print_ctrl_info(pci_dev, id);
Kevin Barnett6c223762016-06-27 16:41:00 -05007191
7192 if (pqi_disable_device_id_wildcards &&
7193 id->subvendor == PCI_ANY_ID &&
7194 id->subdevice == PCI_ANY_ID) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05007195 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007196 "controller not probed because device ID wildcards are disabled\n");
7197 return -ENODEV;
7198 }
7199
7200 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
Kevin Barnettd91d7822017-05-03 18:53:30 -05007201 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007202 "controller device ID matched using wildcards\n");
7203
Kevin Barnettd91d7822017-05-03 18:53:30 -05007204 node = dev_to_node(&pci_dev->dev);
Sagar Biradar62dc51f2018-12-07 16:29:12 -06007205 if (node == NUMA_NO_NODE) {
7206 cp_node = cpu_to_node(0);
7207 if (cp_node == NUMA_NO_NODE)
7208 cp_node = 0;
7209 set_dev_node(&pci_dev->dev, cp_node);
7210 }
Kevin Barnett6c223762016-06-27 16:41:00 -05007211
7212 ctrl_info = pqi_alloc_ctrl_info(node);
7213 if (!ctrl_info) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05007214 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007215 "failed to allocate controller info block\n");
7216 return -ENOMEM;
7217 }
7218
Kevin Barnettd91d7822017-05-03 18:53:30 -05007219 ctrl_info->pci_dev = pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05007220
7221 rc = pqi_pci_init(ctrl_info);
7222 if (rc)
7223 goto error;
7224
7225 rc = pqi_ctrl_init(ctrl_info);
7226 if (rc)
7227 goto error;
7228
7229 return 0;
7230
7231error:
7232 pqi_remove_ctrl(ctrl_info);
7233
7234 return rc;
7235}
7236
Kevin Barnettd91d7822017-05-03 18:53:30 -05007237static void pqi_pci_remove(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05007238{
7239 struct pqi_ctrl_info *ctrl_info;
7240
Kevin Barnettd91d7822017-05-03 18:53:30 -05007241 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007242 if (!ctrl_info)
7243 return;
7244
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06007245 ctrl_info->in_shutdown = true;
7246
Kevin Barnett6c223762016-06-27 16:41:00 -05007247 pqi_remove_ctrl(ctrl_info);
7248}
7249
Kevin Barnettd91d7822017-05-03 18:53:30 -05007250static void pqi_shutdown(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05007251{
7252 int rc;
7253 struct pqi_ctrl_info *ctrl_info;
7254
Kevin Barnettd91d7822017-05-03 18:53:30 -05007255 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007256 if (!ctrl_info)
7257 goto error;
7258
7259 /*
7260 * Write all data in the controller's battery-backed cache to
7261 * storage.
7262 */
Kevin Barnett58322fe2017-08-10 13:46:45 -05007263 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
Kevin Barnettb6d47812017-08-10 13:47:03 -05007264 pqi_reset(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007265 if (rc == 0)
7266 return;
7267
7268error:
Kevin Barnettd91d7822017-05-03 18:53:30 -05007269 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007270 "unable to flush controller cache\n");
7271}
7272
Kevin Barnett3c509762017-05-03 18:54:37 -05007273static void pqi_process_lockup_action_param(void)
7274{
7275 unsigned int i;
7276
7277 if (!pqi_lockup_action_param)
7278 return;
7279
7280 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7281 if (strcmp(pqi_lockup_action_param,
7282 pqi_lockup_actions[i].name) == 0) {
7283 pqi_lockup_action = pqi_lockup_actions[i].action;
7284 return;
7285 }
7286 }
7287
7288 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
7289 DRIVER_NAME_SHORT, pqi_lockup_action_param);
7290}
7291
7292static void pqi_process_module_params(void)
7293{
7294 pqi_process_lockup_action_param();
7295}
7296
Arnd Bergmann5c146682017-05-18 10:32:18 +02007297static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
Kevin Barnett061ef062017-05-03 18:53:05 -05007298{
7299 struct pqi_ctrl_info *ctrl_info;
7300
7301 ctrl_info = pci_get_drvdata(pci_dev);
7302
7303 pqi_disable_events(ctrl_info);
7304 pqi_cancel_update_time_worker(ctrl_info);
7305 pqi_cancel_rescan_worker(ctrl_info);
7306 pqi_wait_until_scan_finished(ctrl_info);
7307 pqi_wait_until_lun_reset_finished(ctrl_info);
Kevin Barnett58322fe2017-08-10 13:46:45 -05007308 pqi_flush_cache(ctrl_info, SUSPEND);
Kevin Barnett061ef062017-05-03 18:53:05 -05007309 pqi_ctrl_block_requests(ctrl_info);
7310 pqi_ctrl_wait_until_quiesced(ctrl_info);
7311 pqi_wait_until_inbound_queues_empty(ctrl_info);
7312 pqi_ctrl_wait_for_pending_io(ctrl_info);
7313 pqi_stop_heartbeat_timer(ctrl_info);
7314
7315 if (state.event == PM_EVENT_FREEZE)
7316 return 0;
7317
7318 pci_save_state(pci_dev);
7319 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
7320
7321 ctrl_info->controller_online = false;
7322 ctrl_info->pqi_mode_enabled = false;
7323
7324 return 0;
7325}
7326
Arnd Bergmann5c146682017-05-18 10:32:18 +02007327static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
Kevin Barnett061ef062017-05-03 18:53:05 -05007328{
7329 int rc;
7330 struct pqi_ctrl_info *ctrl_info;
7331
7332 ctrl_info = pci_get_drvdata(pci_dev);
7333
7334 if (pci_dev->current_state != PCI_D0) {
7335 ctrl_info->max_hw_queue_index = 0;
7336 pqi_free_interrupts(ctrl_info);
7337 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
7338 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
7339 IRQF_SHARED, DRIVER_NAME_SHORT,
7340 &ctrl_info->queue_groups[0]);
7341 if (rc) {
7342 dev_err(&ctrl_info->pci_dev->dev,
7343 "irq %u init failed with error %d\n",
7344 pci_dev->irq, rc);
7345 return rc;
7346 }
7347 pqi_start_heartbeat_timer(ctrl_info);
7348 pqi_ctrl_unblock_requests(ctrl_info);
7349 return 0;
7350 }
7351
7352 pci_set_power_state(pci_dev, PCI_D0);
7353 pci_restore_state(pci_dev);
7354
7355 return pqi_ctrl_init_resume(ctrl_info);
7356}
7357
Kevin Barnett6c223762016-06-27 16:41:00 -05007358/* Define the PCI IDs for the controllers that we support. */
7359static const struct pci_device_id pqi_pci_id_table[] = {
7360 {
7361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007362 0x105b, 0x1211)
7363 },
7364 {
7365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7366 0x105b, 0x1321)
7367 },
7368 {
7369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007370 0x152d, 0x8a22)
7371 },
7372 {
7373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7374 0x152d, 0x8a23)
7375 },
7376 {
7377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7378 0x152d, 0x8a24)
7379 },
7380 {
7381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7382 0x152d, 0x8a36)
7383 },
7384 {
7385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7386 0x152d, 0x8a37)
7387 },
7388 {
7389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007390 0x193d, 0x8460)
7391 },
7392 {
7393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7394 0x193d, 0x8461)
7395 },
7396 {
7397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Murthy Bhat84a77fe2018-12-07 16:28:53 -06007398 0x193d, 0xc460)
7399 },
7400 {
7401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7402 0x193d, 0xc461)
7403 },
7404 {
7405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007406 0x193d, 0xf460)
7407 },
7408 {
7409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7410 0x193d, 0xf461)
7411 },
7412 {
7413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7414 0x1bd4, 0x0045)
7415 },
7416 {
7417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7418 0x1bd4, 0x0046)
7419 },
7420 {
7421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7422 0x1bd4, 0x0047)
7423 },
7424 {
7425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7426 0x1bd4, 0x0048)
7427 },
7428 {
7429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007430 0x1bd4, 0x004a)
7431 },
7432 {
7433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7434 0x1bd4, 0x004b)
7435 },
7436 {
7437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7438 0x1bd4, 0x004c)
7439 },
7440 {
7441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Ajish Koshyc1b10472018-12-07 16:29:18 -06007442 0x19e5, 0xd227)
7443 },
7444 {
7445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7446 0x19e5, 0xd228)
7447 },
7448 {
7449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7450 0x19e5, 0xd229)
7451 },
7452 {
7453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7454 0x19e5, 0xd22a)
7455 },
7456 {
7457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7458 0x19e5, 0xd22b)
7459 },
7460 {
7461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7462 0x19e5, 0xd22c)
7463 },
7464 {
7465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007466 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
7467 },
7468 {
7469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007470 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
Kevin Barnett6c223762016-06-27 16:41:00 -05007471 },
7472 {
7473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7474 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
7475 },
7476 {
7477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7478 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
7479 },
7480 {
7481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7482 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
7483 },
7484 {
7485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7486 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
7487 },
7488 {
7489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7490 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
7491 },
7492 {
7493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7494 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
7495 },
7496 {
7497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007498 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
7499 },
7500 {
7501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007502 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
7503 },
7504 {
7505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007506 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
7507 },
7508 {
7509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7510 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
7511 },
7512 {
7513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7514 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
7515 },
7516 {
7517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7518 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
7519 },
7520 {
7521 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7522 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
7523 },
7524 {
7525 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7526 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
7527 },
7528 {
7529 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7530 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
7531 },
7532 {
7533 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007534 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
7535 },
7536 {
7537 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7538 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
7539 },
7540 {
7541 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007542 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
7543 },
7544 {
7545 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007546 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
7547 },
7548 {
7549 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7550 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
7551 },
7552 {
7553 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7554 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
7555 },
7556 {
7557 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7558 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
7559 },
7560 {
7561 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7562 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
7563 },
7564 {
7565 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007566 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
7567 },
7568 {
7569 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007570 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
7571 },
7572 {
7573 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7574 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
7575 },
7576 {
7577 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettbd809e82017-09-27 16:29:59 -05007578 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
7579 },
7580 {
7581 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7582 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
7583 },
7584 {
7585 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007586 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
7587 },
7588 {
7589 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007590 PCI_VENDOR_ID_ADVANTECH, 0x8312)
7591 },
7592 {
7593 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007594 PCI_VENDOR_ID_DELL, 0x1fe0)
7595 },
7596 {
7597 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007598 PCI_VENDOR_ID_HP, 0x0600)
7599 },
7600 {
7601 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7602 PCI_VENDOR_ID_HP, 0x0601)
7603 },
7604 {
7605 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7606 PCI_VENDOR_ID_HP, 0x0602)
7607 },
7608 {
7609 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7610 PCI_VENDOR_ID_HP, 0x0603)
7611 },
7612 {
7613 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007614 PCI_VENDOR_ID_HP, 0x0609)
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007615 },
7616 {
7617 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7618 PCI_VENDOR_ID_HP, 0x0650)
7619 },
7620 {
7621 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7622 PCI_VENDOR_ID_HP, 0x0651)
7623 },
7624 {
7625 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7626 PCI_VENDOR_ID_HP, 0x0652)
7627 },
7628 {
7629 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7630 PCI_VENDOR_ID_HP, 0x0653)
7631 },
7632 {
7633 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7634 PCI_VENDOR_ID_HP, 0x0654)
7635 },
7636 {
7637 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7638 PCI_VENDOR_ID_HP, 0x0655)
7639 },
7640 {
7641 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007642 PCI_VENDOR_ID_HP, 0x0700)
7643 },
7644 {
7645 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7646 PCI_VENDOR_ID_HP, 0x0701)
7647 },
7648 {
7649 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007650 PCI_VENDOR_ID_HP, 0x1001)
7651 },
7652 {
7653 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7654 PCI_VENDOR_ID_HP, 0x1100)
7655 },
7656 {
7657 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7658 PCI_VENDOR_ID_HP, 0x1101)
7659 },
7660 {
7661 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007662 PCI_ANY_ID, PCI_ANY_ID)
7663 },
7664 { 0 }
7665};
7666
7667MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7668
7669static struct pci_driver pqi_pci_driver = {
7670 .name = DRIVER_NAME_SHORT,
7671 .id_table = pqi_pci_id_table,
7672 .probe = pqi_pci_probe,
7673 .remove = pqi_pci_remove,
7674 .shutdown = pqi_shutdown,
Kevin Barnett061ef062017-05-03 18:53:05 -05007675#if defined(CONFIG_PM)
7676 .suspend = pqi_suspend,
7677 .resume = pqi_resume,
7678#endif
Kevin Barnett6c223762016-06-27 16:41:00 -05007679};
7680
7681static int __init pqi_init(void)
7682{
7683 int rc;
7684
7685 pr_info(DRIVER_NAME "\n");
7686
7687 pqi_sas_transport_template =
7688 sas_attach_transport(&pqi_sas_transport_functions);
7689 if (!pqi_sas_transport_template)
7690 return -ENODEV;
7691
Kevin Barnett3c509762017-05-03 18:54:37 -05007692 pqi_process_module_params();
7693
Kevin Barnett6c223762016-06-27 16:41:00 -05007694 rc = pci_register_driver(&pqi_pci_driver);
7695 if (rc)
7696 sas_release_transport(pqi_sas_transport_template);
7697
7698 return rc;
7699}
7700
7701static void __exit pqi_cleanup(void)
7702{
7703 pci_unregister_driver(&pqi_pci_driver);
7704 sas_release_transport(pqi_sas_transport_template);
7705}
7706
7707module_init(pqi_init);
7708module_exit(pqi_cleanup);
7709
7710static void __attribute__((unused)) verify_structures(void)
7711{
7712 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7713 sis_host_to_ctrl_doorbell) != 0x20);
7714 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7715 sis_interrupt_mask) != 0x34);
7716 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7717 sis_ctrl_to_host_doorbell) != 0x9c);
7718 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7719 sis_ctrl_to_host_doorbell_clear) != 0xa0);
7720 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05007721 sis_driver_scratch) != 0xb0);
7722 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05007723 sis_firmware_status) != 0xbc);
7724 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7725 sis_mailbox) != 0x1000);
7726 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7727 pqi_registers) != 0x4000);
7728
7729 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7730 iu_type) != 0x0);
7731 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7732 iu_length) != 0x2);
7733 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7734 response_queue_id) != 0x4);
7735 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7736 work_area) != 0x6);
7737 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7738
7739 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7740 status) != 0x0);
7741 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7742 service_response) != 0x1);
7743 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7744 data_present) != 0x2);
7745 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7746 reserved) != 0x3);
7747 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7748 residual_count) != 0x4);
7749 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7750 data_length) != 0x8);
7751 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7752 reserved1) != 0xa);
7753 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7754 data) != 0xc);
7755 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7756
7757 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7758 data_in_result) != 0x0);
7759 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7760 data_out_result) != 0x1);
7761 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7762 reserved) != 0x2);
7763 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7764 status) != 0x5);
7765 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7766 status_qualifier) != 0x6);
7767 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7768 sense_data_length) != 0x8);
7769 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7770 response_data_length) != 0xa);
7771 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7772 data_in_transferred) != 0xc);
7773 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7774 data_out_transferred) != 0x10);
7775 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7776 data) != 0x14);
7777 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7778
7779 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7780 signature) != 0x0);
7781 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7782 function_and_status_code) != 0x8);
7783 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7784 max_admin_iq_elements) != 0x10);
7785 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7786 max_admin_oq_elements) != 0x11);
7787 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7788 admin_iq_element_length) != 0x12);
7789 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7790 admin_oq_element_length) != 0x13);
7791 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7792 max_reset_timeout) != 0x14);
7793 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7794 legacy_intx_status) != 0x18);
7795 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7796 legacy_intx_mask_set) != 0x1c);
7797 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7798 legacy_intx_mask_clear) != 0x20);
7799 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7800 device_status) != 0x40);
7801 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7802 admin_iq_pi_offset) != 0x48);
7803 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7804 admin_oq_ci_offset) != 0x50);
7805 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7806 admin_iq_element_array_addr) != 0x58);
7807 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7808 admin_oq_element_array_addr) != 0x60);
7809 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7810 admin_iq_ci_addr) != 0x68);
7811 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7812 admin_oq_pi_addr) != 0x70);
7813 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7814 admin_iq_num_elements) != 0x78);
7815 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7816 admin_oq_num_elements) != 0x79);
7817 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7818 admin_queue_int_msg_num) != 0x7a);
7819 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7820 device_error) != 0x80);
7821 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7822 error_details) != 0x88);
7823 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7824 device_reset) != 0x90);
7825 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7826 power_action) != 0x94);
7827 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7828
7829 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7830 header.iu_type) != 0);
7831 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7832 header.iu_length) != 2);
7833 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7834 header.work_area) != 6);
7835 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7836 request_id) != 8);
7837 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7838 function_code) != 10);
7839 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7840 data.report_device_capability.buffer_length) != 44);
7841 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7842 data.report_device_capability.sg_descriptor) != 48);
7843 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7844 data.create_operational_iq.queue_id) != 12);
7845 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7846 data.create_operational_iq.element_array_addr) != 16);
7847 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7848 data.create_operational_iq.ci_addr) != 24);
7849 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7850 data.create_operational_iq.num_elements) != 32);
7851 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7852 data.create_operational_iq.element_length) != 34);
7853 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7854 data.create_operational_iq.queue_protocol) != 36);
7855 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7856 data.create_operational_oq.queue_id) != 12);
7857 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7858 data.create_operational_oq.element_array_addr) != 16);
7859 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7860 data.create_operational_oq.pi_addr) != 24);
7861 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7862 data.create_operational_oq.num_elements) != 32);
7863 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7864 data.create_operational_oq.element_length) != 34);
7865 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7866 data.create_operational_oq.queue_protocol) != 36);
7867 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7868 data.create_operational_oq.int_msg_num) != 40);
7869 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7870 data.create_operational_oq.coalescing_count) != 42);
7871 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7872 data.create_operational_oq.min_coalescing_time) != 44);
7873 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7874 data.create_operational_oq.max_coalescing_time) != 48);
7875 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7876 data.delete_operational_queue.queue_id) != 12);
7877 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7878 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7879 data.create_operational_iq) != 64 - 11);
7880 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7881 data.create_operational_oq) != 64 - 11);
7882 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7883 data.delete_operational_queue) != 64 - 11);
7884
7885 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7886 header.iu_type) != 0);
7887 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7888 header.iu_length) != 2);
7889 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7890 header.work_area) != 6);
7891 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7892 request_id) != 8);
7893 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7894 function_code) != 10);
7895 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7896 status) != 11);
7897 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7898 data.create_operational_iq.status_descriptor) != 12);
7899 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7900 data.create_operational_iq.iq_pi_offset) != 16);
7901 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7902 data.create_operational_oq.status_descriptor) != 12);
7903 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7904 data.create_operational_oq.oq_ci_offset) != 16);
7905 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7906
7907 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7908 header.iu_type) != 0);
7909 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7910 header.iu_length) != 2);
7911 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7912 header.response_queue_id) != 4);
7913 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7914 header.work_area) != 6);
7915 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7916 request_id) != 8);
7917 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7918 nexus_id) != 10);
7919 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7920 buffer_length) != 12);
7921 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7922 lun_number) != 16);
7923 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7924 protocol_specific) != 24);
7925 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7926 error_index) != 27);
7927 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7928 cdb) != 32);
7929 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7930 sg_descriptors) != 64);
7931 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7932 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7933
7934 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7935 header.iu_type) != 0);
7936 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7937 header.iu_length) != 2);
7938 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7939 header.response_queue_id) != 4);
7940 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7941 header.work_area) != 6);
7942 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7943 request_id) != 8);
7944 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7945 nexus_id) != 12);
7946 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7947 buffer_length) != 16);
7948 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7949 data_encryption_key_index) != 22);
7950 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7951 encrypt_tweak_lower) != 24);
7952 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7953 encrypt_tweak_upper) != 28);
7954 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7955 cdb) != 32);
7956 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7957 error_index) != 48);
7958 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7959 num_sg_descriptors) != 50);
7960 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7961 cdb_length) != 51);
7962 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7963 lun_number) != 52);
7964 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7965 sg_descriptors) != 64);
7966 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7967 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7968
7969 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7970 header.iu_type) != 0);
7971 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7972 header.iu_length) != 2);
7973 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7974 request_id) != 8);
7975 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7976 error_index) != 10);
7977
7978 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7979 header.iu_type) != 0);
7980 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7981 header.iu_length) != 2);
7982 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7983 header.response_queue_id) != 4);
7984 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7985 request_id) != 8);
7986 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7987 data.report_event_configuration.buffer_length) != 12);
7988 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7989 data.report_event_configuration.sg_descriptors) != 16);
7990 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7991 data.set_event_configuration.global_event_oq_id) != 10);
7992 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7993 data.set_event_configuration.buffer_length) != 12);
7994 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7995 data.set_event_configuration.sg_descriptors) != 16);
7996
7997 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7998 max_inbound_iu_length) != 6);
7999 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8000 max_outbound_iu_length) != 14);
8001 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8002
8003 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8004 data_length) != 0);
8005 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8006 iq_arbitration_priority_support_bitmask) != 8);
8007 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8008 maximum_aw_a) != 9);
8009 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8010 maximum_aw_b) != 10);
8011 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8012 maximum_aw_c) != 11);
8013 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8014 max_inbound_queues) != 16);
8015 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8016 max_elements_per_iq) != 18);
8017 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8018 max_iq_element_length) != 24);
8019 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8020 min_iq_element_length) != 26);
8021 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8022 max_outbound_queues) != 30);
8023 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8024 max_elements_per_oq) != 32);
8025 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8026 intr_coalescing_time_granularity) != 34);
8027 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8028 max_oq_element_length) != 36);
8029 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8030 min_oq_element_length) != 38);
8031 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8032 iu_layer_descriptors) != 64);
8033 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8034
8035 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8036 event_type) != 0);
8037 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8038 oq_id) != 2);
8039 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8040
8041 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8042 num_event_descriptors) != 2);
8043 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8044 descriptors) != 4);
8045
Kevin Barnett061ef062017-05-03 18:53:05 -05008046 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8047 ARRAY_SIZE(pqi_supported_event_types));
8048
Kevin Barnett6c223762016-06-27 16:41:00 -05008049 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8050 header.iu_type) != 0);
8051 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8052 header.iu_length) != 2);
8053 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8054 event_type) != 8);
8055 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8056 event_id) != 10);
8057 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8058 additional_event_id) != 12);
8059 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8060 data) != 16);
8061 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8062
8063 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8064 header.iu_type) != 0);
8065 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8066 header.iu_length) != 2);
8067 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8068 event_type) != 8);
8069 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8070 event_id) != 10);
8071 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8072 additional_event_id) != 12);
8073 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8074
8075 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8076 header.iu_type) != 0);
8077 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8078 header.iu_length) != 2);
8079 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8080 request_id) != 8);
8081 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8082 nexus_id) != 10);
8083 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8084 lun_number) != 16);
8085 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8086 protocol_specific) != 24);
8087 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8088 outbound_queue_id_to_manage) != 26);
8089 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8090 request_id_to_manage) != 28);
8091 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8092 task_management_function) != 30);
8093 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8094
8095 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8096 header.iu_type) != 0);
8097 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8098 header.iu_length) != 2);
8099 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8100 request_id) != 8);
8101 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8102 nexus_id) != 10);
8103 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8104 additional_response_info) != 12);
8105 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8106 response_code) != 15);
8107 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8108
8109 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8110 configured_logical_drive_count) != 0);
8111 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8112 configuration_signature) != 1);
8113 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8114 firmware_version) != 5);
8115 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8116 extended_logical_unit_count) != 154);
8117 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8118 firmware_build_number) != 190);
8119 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8120 controller_mode) != 292);
8121
Kevin Barnett1be42f42017-05-03 18:53:42 -05008122 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8123 phys_bay_in_box) != 115);
8124 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8125 device_type) != 120);
8126 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8127 redundant_path_present_map) != 1736);
8128 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8129 active_path_number) != 1738);
8130 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8131 alternate_paths_phys_connector) != 1739);
8132 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8133 alternate_paths_phys_box_on_port) != 1755);
8134 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8135 current_queue_depth_limit) != 1796);
8136 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8137
Kevin Barnett6c223762016-06-27 16:41:00 -05008138 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8139 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8140 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8141 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8142 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8143 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8144 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8145 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8146 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8147 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8148 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8149 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8150
8151 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
Kevin Barnettd727a772017-05-03 18:54:25 -05008152 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8153 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
Kevin Barnett6c223762016-06-27 16:41:00 -05008154}