blob: 96e171038eb6374d4b6d2b672718f82431fe1222 [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
Kevin Barnettb805dbf2017-05-03 18:54:06 -05003 * Copyright (c) 2016-2017 Microsemi Corporation
Kevin Barnett6c223762016-06-27 16:41:00 -05004 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
Kevin Barnett3c509762017-05-03 18:54:37 -050027#include <linux/reboot.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050028#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060029#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050030#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
Don Brace4ae5e9d2018-06-18 13:23:06 -050043#define DRIVER_VERSION "1.1.4-130"
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050044#define DRIVER_MAJOR 1
Kevin Barnettb98117c2017-08-10 13:47:15 -050045#define DRIVER_MINOR 1
Don Brace61c187e2018-03-21 13:32:37 -050046#define DRIVER_RELEASE 4
Don Brace4ae5e9d2018-06-18 13:23:06 -050047#define DRIVER_REVISION 130
Kevin Barnett6c223762016-06-27 16:41:00 -050048
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050049#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
Kevin Barnett6c223762016-06-27 16:41:00 -050051#define DRIVER_NAME_SHORT "smartpqi"
52
Kevin Barnette1d213b2017-05-03 18:53:18 -050053#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
54
Kevin Barnett6c223762016-06-27 16:41:00 -050055MODULE_AUTHOR("Microsemi");
56MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
57 DRIVER_VERSION);
58MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59MODULE_VERSION(DRIVER_VERSION);
60MODULE_LICENSE("GPL");
61
Kevin Barnett6c223762016-06-27 16:41:00 -050062static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett5f310422017-05-03 18:54:55 -050063static void pqi_ctrl_offline_worker(struct work_struct *work);
Kevin Barnett376fb882017-05-03 18:54:43 -050064static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -050065static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66static void pqi_scan_start(struct Scsi_Host *shost);
67static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -050076 struct pqi_encryption_info *encryption_info, bool raid_bypass);
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -060077static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
Kevin Barnett6c223762016-06-27 16:41:00 -050079
80/* for flags argument to pqi_submit_raid_request_synchronous() */
81#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
82
83static struct scsi_transport_template *pqi_sas_transport_template;
84
85static atomic_t pqi_controller_count = ATOMIC_INIT(0);
86
Kevin Barnett3c509762017-05-03 18:54:37 -050087enum pqi_lockup_action {
88 NONE,
89 REBOOT,
90 PANIC
91};
92
93static enum pqi_lockup_action pqi_lockup_action = NONE;
94
95static struct {
96 enum pqi_lockup_action action;
97 char *name;
98} pqi_lockup_actions[] = {
99 {
100 .action = NONE,
101 .name = "none",
102 },
103 {
104 .action = REBOOT,
105 .name = "reboot",
106 },
107 {
108 .action = PANIC,
109 .name = "panic",
110 },
111};
112
Kevin Barnett6a50d6a2017-05-03 18:52:52 -0500113static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
118 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
119 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
120};
121
Kevin Barnett6c223762016-06-27 16:41:00 -0500122static int pqi_disable_device_id_wildcards;
123module_param_named(disable_device_id_wildcards,
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -0500124 pqi_disable_device_id_wildcards, int, 0644);
Kevin Barnett6c223762016-06-27 16:41:00 -0500125MODULE_PARM_DESC(disable_device_id_wildcards,
126 "Disable device ID wildcards.");
127
Kevin Barnett5a259e32017-05-03 18:55:43 -0500128static int pqi_disable_heartbeat;
129module_param_named(disable_heartbeat,
130 pqi_disable_heartbeat, int, 0644);
131MODULE_PARM_DESC(disable_heartbeat,
132 "Disable heartbeat.");
133
134static int pqi_disable_ctrl_shutdown;
135module_param_named(disable_ctrl_shutdown,
136 pqi_disable_ctrl_shutdown, int, 0644);
137MODULE_PARM_DESC(disable_ctrl_shutdown,
138 "Disable controller shutdown when controller locked up.");
139
Kevin Barnett3c509762017-05-03 18:54:37 -0500140static char *pqi_lockup_action_param;
141module_param_named(lockup_action,
142 pqi_lockup_action_param, charp, 0644);
143MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
144 "\t\tSupported: none, reboot, panic\n"
145 "\t\tDefault: none");
146
Kevin Barnett6c223762016-06-27 16:41:00 -0500147static char *raid_levels[] = {
148 "RAID-0",
149 "RAID-4",
150 "RAID-1(1+0)",
151 "RAID-5",
152 "RAID-5+1",
153 "RAID-ADG",
154 "RAID-1(ADM)",
155};
156
157static char *pqi_raid_level_to_string(u8 raid_level)
158{
159 if (raid_level < ARRAY_SIZE(raid_levels))
160 return raid_levels[raid_level];
161
Kevin Barnetta9f93392017-05-03 18:55:31 -0500162 return "RAID UNKNOWN";
Kevin Barnett6c223762016-06-27 16:41:00 -0500163}
164
165#define SA_RAID_0 0
166#define SA_RAID_4 1
167#define SA_RAID_1 2 /* also used for RAID 10 */
168#define SA_RAID_5 3 /* also used for RAID 50 */
169#define SA_RAID_51 4
170#define SA_RAID_6 5 /* also used for RAID 60 */
171#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
172#define SA_RAID_MAX SA_RAID_ADM
173#define SA_RAID_UNKNOWN 0xff
174
175static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
176{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500177 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500178 scmd->scsi_done(scmd);
179}
180
Dave Carrollb6e2ef62018-12-07 16:28:23 -0600181static inline void pqi_disable_write_same(struct scsi_device *sdev)
182{
183 sdev->no_write_same = 1;
184}
185
Kevin Barnett6c223762016-06-27 16:41:00 -0500186static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
187{
188 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
189}
190
Kevin Barnett6c223762016-06-27 16:41:00 -0500191static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
192{
193 return !device->is_physical_device;
194}
195
Kevin Barnettbd10cf02017-05-03 18:54:12 -0500196static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
197{
198 return scsi3addr[2] != 0;
199}
200
Kevin Barnett6c223762016-06-27 16:41:00 -0500201static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
202{
203 if (ctrl_info->controller_online)
204 if (!sis_is_firmware_running(ctrl_info))
205 pqi_take_ctrl_offline(ctrl_info);
206}
207
208static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
209{
210 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
211}
212
Kevin Barnettff6abb72016-08-31 14:54:41 -0500213static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
214 struct pqi_ctrl_info *ctrl_info)
215{
216 return sis_read_driver_scratch(ctrl_info);
217}
218
219static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
220 enum pqi_ctrl_mode mode)
221{
222 sis_write_driver_scratch(ctrl_info, mode);
223}
224
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500225static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
226{
227 ctrl_info->block_requests = true;
228 scsi_block_requests(ctrl_info->scsi_host);
229}
230
231static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
232{
233 ctrl_info->block_requests = false;
234 wake_up_all(&ctrl_info->block_requests_wait);
Kevin Barnett376fb882017-05-03 18:54:43 -0500235 pqi_retry_raid_bypass_requests(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500236 scsi_unblock_requests(ctrl_info->scsi_host);
237}
238
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500239static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
240 unsigned long timeout_msecs)
241{
242 unsigned long remaining_msecs;
243
244 if (!pqi_ctrl_blocked(ctrl_info))
245 return timeout_msecs;
246
247 atomic_inc(&ctrl_info->num_blocked_threads);
248
249 if (timeout_msecs == NO_TIMEOUT) {
250 wait_event(ctrl_info->block_requests_wait,
251 !pqi_ctrl_blocked(ctrl_info));
252 remaining_msecs = timeout_msecs;
253 } else {
254 unsigned long remaining_jiffies;
255
256 remaining_jiffies =
257 wait_event_timeout(ctrl_info->block_requests_wait,
258 !pqi_ctrl_blocked(ctrl_info),
259 msecs_to_jiffies(timeout_msecs));
260 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
261 }
262
263 atomic_dec(&ctrl_info->num_blocked_threads);
264
265 return remaining_msecs;
266}
267
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500268static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
269{
270 while (atomic_read(&ctrl_info->num_busy_threads) >
271 atomic_read(&ctrl_info->num_blocked_threads))
272 usleep_range(1000, 2000);
273}
274
Kevin Barnett03b288cf2017-05-03 18:54:49 -0500275static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
276{
277 return device->device_offline;
278}
279
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500280static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
281{
282 device->in_reset = true;
283}
284
285static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
286{
287 device->in_reset = false;
288}
289
290static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
291{
292 return device->in_reset;
293}
Kevin Barnett6c223762016-06-27 16:41:00 -0500294
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -0600295static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
296{
297 device->in_remove = true;
298}
299
300static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
301 struct pqi_scsi_dev *device)
302{
303 return device->in_remove & !ctrl_info->in_shutdown;
304}
305
Kevin Barnett5f310422017-05-03 18:54:55 -0500306static inline void pqi_schedule_rescan_worker_with_delay(
307 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
308{
309 if (pqi_ctrl_offline(ctrl_info))
310 return;
311
312 schedule_delayed_work(&ctrl_info->rescan_work, delay);
313}
314
Kevin Barnett6c223762016-06-27 16:41:00 -0500315static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
316{
Kevin Barnett5f310422017-05-03 18:54:55 -0500317 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
318}
319
320#define PQI_RESCAN_WORK_DELAY (10 * HZ)
321
322static inline void pqi_schedule_rescan_worker_delayed(
323 struct pqi_ctrl_info *ctrl_info)
324{
325 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
Kevin Barnett6c223762016-06-27 16:41:00 -0500326}
327
Kevin Barnett061ef062017-05-03 18:53:05 -0500328static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
329{
330 cancel_delayed_work_sync(&ctrl_info->rescan_work);
331}
332
Kevin Barnett98f87662017-05-03 18:53:11 -0500333static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
334{
335 if (!ctrl_info->heartbeat_counter)
336 return 0;
337
338 return readl(ctrl_info->heartbeat_counter);
339}
340
Kevin Barnett6c223762016-06-27 16:41:00 -0500341static int pqi_map_single(struct pci_dev *pci_dev,
342 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200343 size_t buffer_length, enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500344{
345 dma_addr_t bus_address;
346
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200347 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500348 return 0;
349
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200350 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
Kevin Barnett6c223762016-06-27 16:41:00 -0500351 data_direction);
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200352 if (dma_mapping_error(&pci_dev->dev, bus_address))
Kevin Barnett6c223762016-06-27 16:41:00 -0500353 return -ENOMEM;
354
355 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
356 put_unaligned_le32(buffer_length, &sg_descriptor->length);
357 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
358
359 return 0;
360}
361
362static void pqi_pci_unmap(struct pci_dev *pci_dev,
363 struct pqi_sg_descriptor *descriptors, int num_descriptors,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200364 enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500365{
366 int i;
367
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200368 if (data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500369 return;
370
371 for (i = 0; i < num_descriptors; i++)
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200372 dma_unmap_single(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -0500373 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
374 get_unaligned_le32(&descriptors[i].length),
375 data_direction);
376}
377
378static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
379 struct pqi_raid_path_request *request, u8 cmd,
380 u8 *scsi3addr, void *buffer, size_t buffer_length,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200381 u16 vpd_page, enum dma_data_direction *dir)
Kevin Barnett6c223762016-06-27 16:41:00 -0500382{
383 u8 *cdb;
Dave Carroll171c2862018-12-07 16:28:35 -0600384 size_t cdb_length = buffer_length;
Kevin Barnett6c223762016-06-27 16:41:00 -0500385
386 memset(request, 0, sizeof(*request));
387
388 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
389 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
390 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
391 &request->header.iu_length);
392 put_unaligned_le32(buffer_length, &request->buffer_length);
393 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
394 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
395 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
396
397 cdb = request->cdb;
398
399 switch (cmd) {
400 case INQUIRY:
401 request->data_direction = SOP_READ_FLAG;
402 cdb[0] = INQUIRY;
403 if (vpd_page & VPD_PAGE) {
404 cdb[1] = 0x1;
405 cdb[2] = (u8)vpd_page;
406 }
Dave Carroll171c2862018-12-07 16:28:35 -0600407 cdb[4] = (u8)cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -0500408 break;
409 case CISS_REPORT_LOG:
410 case CISS_REPORT_PHYS:
411 request->data_direction = SOP_READ_FLAG;
412 cdb[0] = cmd;
413 if (cmd == CISS_REPORT_PHYS)
414 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
415 else
416 cdb[1] = CISS_REPORT_LOG_EXTENDED;
Dave Carroll171c2862018-12-07 16:28:35 -0600417 put_unaligned_be32(cdb_length, &cdb[6]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500418 break;
419 case CISS_GET_RAID_MAP:
420 request->data_direction = SOP_READ_FLAG;
421 cdb[0] = CISS_READ;
422 cdb[1] = CISS_GET_RAID_MAP;
Dave Carroll171c2862018-12-07 16:28:35 -0600423 put_unaligned_be32(cdb_length, &cdb[6]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500424 break;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500425 case SA_FLUSH_CACHE:
Kevin Barnett6c223762016-06-27 16:41:00 -0500426 request->data_direction = SOP_WRITE_FLAG;
427 cdb[0] = BMIC_WRITE;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500428 cdb[6] = BMIC_FLUSH_CACHE;
Dave Carroll171c2862018-12-07 16:28:35 -0600429 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500430 break;
Dave Carroll171c2862018-12-07 16:28:35 -0600431 case BMIC_SENSE_DIAG_OPTIONS:
432 cdb_length = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -0500433 case BMIC_IDENTIFY_CONTROLLER:
434 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
435 request->data_direction = SOP_READ_FLAG;
436 cdb[0] = BMIC_READ;
437 cdb[6] = cmd;
Dave Carroll171c2862018-12-07 16:28:35 -0600438 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500439 break;
Dave Carroll171c2862018-12-07 16:28:35 -0600440 case BMIC_SET_DIAG_OPTIONS:
441 cdb_length = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -0500442 case BMIC_WRITE_HOST_WELLNESS:
443 request->data_direction = SOP_WRITE_FLAG;
444 cdb[0] = BMIC_WRITE;
445 cdb[6] = cmd;
Dave Carroll171c2862018-12-07 16:28:35 -0600446 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500447 break;
Don Brace3d46a592018-12-07 16:30:05 -0600448 case BMIC_CSMI_PASSTHRU:
449 request->data_direction = SOP_BIDIRECTIONAL;
450 cdb[0] = BMIC_WRITE;
451 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
452 cdb[6] = cmd;
453 put_unaligned_be16(cdb_length, &cdb[7]);
454 break;
Kevin Barnett6c223762016-06-27 16:41:00 -0500455 default:
456 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
457 cmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500458 break;
459 }
460
461 switch (request->data_direction) {
462 case SOP_READ_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200463 *dir = DMA_FROM_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500464 break;
465 case SOP_WRITE_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200466 *dir = DMA_TO_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500467 break;
468 case SOP_NO_DIRECTION_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200469 *dir = DMA_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500470 break;
471 default:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200472 *dir = DMA_BIDIRECTIONAL;
Kevin Barnett6c223762016-06-27 16:41:00 -0500473 break;
474 }
475
Kevin Barnett6c223762016-06-27 16:41:00 -0500476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200477 buffer, buffer_length, *dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500478}
479
Kevin Barnett376fb882017-05-03 18:54:43 -0500480static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
481{
482 io_request->scmd = NULL;
483 io_request->status = 0;
484 io_request->error_info = NULL;
485 io_request->raid_bypass = false;
486}
487
Kevin Barnett6c223762016-06-27 16:41:00 -0500488static struct pqi_io_request *pqi_alloc_io_request(
489 struct pqi_ctrl_info *ctrl_info)
490{
491 struct pqi_io_request *io_request;
492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
493
494 while (1) {
495 io_request = &ctrl_info->io_request_pool[i];
496 if (atomic_inc_return(&io_request->refcount) == 1)
497 break;
498 atomic_dec(&io_request->refcount);
499 i = (i + 1) % ctrl_info->max_io_slots;
500 }
501
502 /* benignly racy */
503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
504
Kevin Barnett376fb882017-05-03 18:54:43 -0500505 pqi_reinit_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -0500506
507 return io_request;
508}
509
510static void pqi_free_io_request(struct pqi_io_request *io_request)
511{
512 atomic_dec(&io_request->refcount);
513}
514
Dave Carroll02133b62018-12-07 16:28:41 -0600515static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
516 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
517 struct pqi_raid_error_info *error_info,
518 unsigned long timeout_msecs)
Kevin Barnett6c223762016-06-27 16:41:00 -0500519{
520 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200521 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500522 struct pqi_raid_path_request request;
523
524 rc = pqi_build_raid_path_request(ctrl_info, &request,
Dave Carroll02133b62018-12-07 16:28:41 -0600525 cmd, scsi3addr, buffer,
526 buffer_length, vpd_page, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500527 if (rc)
528 return rc;
529
Dave Carroll02133b62018-12-07 16:28:41 -0600530 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
531 0, error_info, timeout_msecs);
Kevin Barnett6c223762016-06-27 16:41:00 -0500532
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200533 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500534 return rc;
535}
536
Dave Carroll02133b62018-12-07 16:28:41 -0600537/* Helper functions for pqi_send_scsi_raid_request */
538
539static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
540 u8 cmd, void *buffer, size_t buffer_length)
541{
542 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
543 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
544}
545
546static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
547 u8 cmd, void *buffer, size_t buffer_length,
548 struct pqi_raid_error_info *error_info)
549{
550 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
551 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
552}
553
554
555static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
556 struct bmic_identify_controller *buffer)
557{
558 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
559 buffer, sizeof(*buffer));
560}
561
562static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -0500563 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
564{
Dave Carroll02133b62018-12-07 16:28:41 -0600565 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
566 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500567}
568
Dave Carrollcd128242018-12-07 16:28:47 -0600569static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
570 u8 *scsi3addr, u16 vpd_page)
571{
572 int rc;
573 int i;
574 int pages;
575 unsigned char *buf, bufsize;
576
577 buf = kzalloc(256, GFP_KERNEL);
578 if (!buf)
579 return false;
580
581 /* Get the size of the page list first */
582 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
583 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
584 buf, SCSI_VPD_HEADER_SZ);
585 if (rc != 0)
586 goto exit_unsupported;
587
588 pages = buf[3];
589 if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
590 bufsize = pages + SCSI_VPD_HEADER_SZ;
591 else
592 bufsize = 255;
593
594 /* Get the whole VPD page list */
595 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
596 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
597 buf, bufsize);
598 if (rc != 0)
599 goto exit_unsupported;
600
601 pages = buf[3];
602 for (i = 1; i <= pages; i++)
603 if (buf[3 + i] == vpd_page)
604 goto exit_supported;
605
606exit_unsupported:
607 kfree(buf);
608 return false;
609
610exit_supported:
611 kfree(buf);
612 return true;
613}
614
615static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
616 u8 *scsi3addr, u8 *device_id, int buflen)
617{
618 int rc;
619 unsigned char *buf;
620
621 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
622 return 1; /* function not supported */
623
624 buf = kzalloc(64, GFP_KERNEL);
625 if (!buf)
626 return -ENOMEM;
627
628 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
629 VPD_PAGE | SCSI_VPD_DEVICE_ID,
630 buf, 64);
631 if (rc == 0) {
632 if (buflen > 16)
633 buflen = 16;
634 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
635 }
636
637 kfree(buf);
638
639 return rc;
640}
641
Kevin Barnett6c223762016-06-27 16:41:00 -0500642static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
643 struct pqi_scsi_dev *device,
644 struct bmic_identify_physical_device *buffer,
645 size_t buffer_length)
646{
647 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200648 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500649 u16 bmic_device_index;
650 struct pqi_raid_path_request request;
651
652 rc = pqi_build_raid_path_request(ctrl_info, &request,
653 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200654 buffer_length, 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500655 if (rc)
656 return rc;
657
658 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
659 request.cdb[2] = (u8)bmic_device_index;
660 request.cdb[9] = (u8)(bmic_device_index >> 8);
661
662 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
663 0, NULL, NO_TIMEOUT);
664
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200665 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500666 return rc;
667}
668
Kevin Barnett58322fe2017-08-10 13:46:45 -0500669static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
670 enum bmic_flush_cache_shutdown_event shutdown_event)
Kevin Barnett6c223762016-06-27 16:41:00 -0500671{
672 int rc;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500673 struct bmic_flush_cache *flush_cache;
Kevin Barnett6c223762016-06-27 16:41:00 -0500674
675 /*
676 * Don't bother trying to flush the cache if the controller is
677 * locked up.
678 */
679 if (pqi_ctrl_offline(ctrl_info))
680 return -ENXIO;
681
Kevin Barnett58322fe2017-08-10 13:46:45 -0500682 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
683 if (!flush_cache)
Kevin Barnett6c223762016-06-27 16:41:00 -0500684 return -ENOMEM;
685
Kevin Barnett58322fe2017-08-10 13:46:45 -0500686 flush_cache->shutdown_event = shutdown_event;
687
Dave Carroll02133b62018-12-07 16:28:41 -0600688 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
689 sizeof(*flush_cache));
Kevin Barnett6c223762016-06-27 16:41:00 -0500690
Kevin Barnett58322fe2017-08-10 13:46:45 -0500691 kfree(flush_cache);
Kevin Barnett6c223762016-06-27 16:41:00 -0500692
693 return rc;
694}
695
Don Brace3d46a592018-12-07 16:30:05 -0600696int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
697 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
698 struct pqi_raid_error_info *error_info)
699{
700 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
701 buffer, buffer_length, error_info);
702}
Dave Carroll171c2862018-12-07 16:28:35 -0600703
704#define PQI_FETCH_PTRAID_DATA (1UL<<31)
705
706static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
707{
708 int rc;
Dave Carroll171c2862018-12-07 16:28:35 -0600709 struct bmic_diag_options *diag;
Dave Carroll171c2862018-12-07 16:28:35 -0600710
711 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
712 if (!diag)
713 return -ENOMEM;
714
Dave Carroll02133b62018-12-07 16:28:41 -0600715 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
716 diag, sizeof(*diag));
Dave Carroll171c2862018-12-07 16:28:35 -0600717 if (rc)
718 goto out;
719
720 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
721
Dave Carroll02133b62018-12-07 16:28:41 -0600722 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
723 diag, sizeof(*diag));
Dave Carroll171c2862018-12-07 16:28:35 -0600724out:
725 kfree(diag);
726
727 return rc;
728}
729
Dave Carroll02133b62018-12-07 16:28:41 -0600730static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -0500731 void *buffer, size_t buffer_length)
732{
Dave Carroll02133b62018-12-07 16:28:41 -0600733 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
734 buffer, buffer_length);
Kevin Barnett6c223762016-06-27 16:41:00 -0500735}
736
737#pragma pack(1)
738
739struct bmic_host_wellness_driver_version {
740 u8 start_tag[4];
741 u8 driver_version_tag[2];
742 __le16 driver_version_length;
743 char driver_version[32];
Mahesh Rajashekharab2346b52018-12-07 16:28:29 -0600744 u8 dont_write_tag[2];
Kevin Barnett6c223762016-06-27 16:41:00 -0500745 u8 end_tag[2];
746};
747
748#pragma pack()
749
750static int pqi_write_driver_version_to_host_wellness(
751 struct pqi_ctrl_info *ctrl_info)
752{
753 int rc;
754 struct bmic_host_wellness_driver_version *buffer;
755 size_t buffer_length;
756
757 buffer_length = sizeof(*buffer);
758
759 buffer = kmalloc(buffer_length, GFP_KERNEL);
760 if (!buffer)
761 return -ENOMEM;
762
763 buffer->start_tag[0] = '<';
764 buffer->start_tag[1] = 'H';
765 buffer->start_tag[2] = 'W';
766 buffer->start_tag[3] = '>';
767 buffer->driver_version_tag[0] = 'D';
768 buffer->driver_version_tag[1] = 'V';
769 put_unaligned_le16(sizeof(buffer->driver_version),
770 &buffer->driver_version_length);
Kevin Barnett061ef062017-05-03 18:53:05 -0500771 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
Kevin Barnett6c223762016-06-27 16:41:00 -0500772 sizeof(buffer->driver_version) - 1);
773 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
Mahesh Rajashekharab2346b52018-12-07 16:28:29 -0600774 buffer->dont_write_tag[0] = 'D';
775 buffer->dont_write_tag[1] = 'W';
Kevin Barnett6c223762016-06-27 16:41:00 -0500776 buffer->end_tag[0] = 'Z';
777 buffer->end_tag[1] = 'Z';
778
779 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
780
781 kfree(buffer);
782
783 return rc;
784}
785
786#pragma pack(1)
787
788struct bmic_host_wellness_time {
789 u8 start_tag[4];
790 u8 time_tag[2];
791 __le16 time_length;
792 u8 time[8];
793 u8 dont_write_tag[2];
794 u8 end_tag[2];
795};
796
797#pragma pack()
798
799static int pqi_write_current_time_to_host_wellness(
800 struct pqi_ctrl_info *ctrl_info)
801{
802 int rc;
803 struct bmic_host_wellness_time *buffer;
804 size_t buffer_length;
805 time64_t local_time;
806 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100807 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500808
809 buffer_length = sizeof(*buffer);
810
811 buffer = kmalloc(buffer_length, GFP_KERNEL);
812 if (!buffer)
813 return -ENOMEM;
814
815 buffer->start_tag[0] = '<';
816 buffer->start_tag[1] = 'H';
817 buffer->start_tag[2] = 'W';
818 buffer->start_tag[3] = '>';
819 buffer->time_tag[0] = 'T';
820 buffer->time_tag[1] = 'D';
821 put_unaligned_le16(sizeof(buffer->time),
822 &buffer->time_length);
823
Arnd Bergmanned108582017-02-17 16:03:52 +0100824 local_time = ktime_get_real_seconds();
825 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500826 year = tm.tm_year + 1900;
827
828 buffer->time[0] = bin2bcd(tm.tm_hour);
829 buffer->time[1] = bin2bcd(tm.tm_min);
830 buffer->time[2] = bin2bcd(tm.tm_sec);
831 buffer->time[3] = 0;
832 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
833 buffer->time[5] = bin2bcd(tm.tm_mday);
834 buffer->time[6] = bin2bcd(year / 100);
835 buffer->time[7] = bin2bcd(year % 100);
836
837 buffer->dont_write_tag[0] = 'D';
838 buffer->dont_write_tag[1] = 'W';
839 buffer->end_tag[0] = 'Z';
840 buffer->end_tag[1] = 'Z';
841
842 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
843
844 kfree(buffer);
845
846 return rc;
847}
848
849#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
850
851static void pqi_update_time_worker(struct work_struct *work)
852{
853 int rc;
854 struct pqi_ctrl_info *ctrl_info;
855
856 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
857 update_time_work);
858
Kevin Barnett5f310422017-05-03 18:54:55 -0500859 if (pqi_ctrl_offline(ctrl_info))
860 return;
861
Kevin Barnett6c223762016-06-27 16:41:00 -0500862 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
863 if (rc)
864 dev_warn(&ctrl_info->pci_dev->dev,
865 "error updating time on controller\n");
866
867 schedule_delayed_work(&ctrl_info->update_time_work,
868 PQI_UPDATE_TIME_WORK_INTERVAL);
869}
870
871static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500872 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500873{
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500874 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett061ef062017-05-03 18:53:05 -0500875}
876
877static inline void pqi_cancel_update_time_worker(
878 struct pqi_ctrl_info *ctrl_info)
879{
Kevin Barnett061ef062017-05-03 18:53:05 -0500880 cancel_delayed_work_sync(&ctrl_info->update_time_work);
Kevin Barnett6c223762016-06-27 16:41:00 -0500881}
882
Dave Carroll02133b62018-12-07 16:28:41 -0600883static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
Kevin Barnett6c223762016-06-27 16:41:00 -0500884 void *buffer, size_t buffer_length)
885{
Dave Carroll02133b62018-12-07 16:28:41 -0600886 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
887 buffer_length);
Kevin Barnett6c223762016-06-27 16:41:00 -0500888}
889
890static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
891 void **buffer)
892{
893 int rc;
894 size_t lun_list_length;
895 size_t lun_data_length;
896 size_t new_lun_list_length;
897 void *lun_data = NULL;
898 struct report_lun_header *report_lun_header;
899
900 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
901 if (!report_lun_header) {
902 rc = -ENOMEM;
903 goto out;
904 }
905
906 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
907 sizeof(*report_lun_header));
908 if (rc)
909 goto out;
910
911 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
912
913again:
914 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
915
916 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
917 if (!lun_data) {
918 rc = -ENOMEM;
919 goto out;
920 }
921
922 if (lun_list_length == 0) {
923 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
924 goto out;
925 }
926
927 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
928 if (rc)
929 goto out;
930
931 new_lun_list_length = get_unaligned_be32(
932 &((struct report_lun_header *)lun_data)->list_length);
933
934 if (new_lun_list_length > lun_list_length) {
935 lun_list_length = new_lun_list_length;
936 kfree(lun_data);
937 goto again;
938 }
939
940out:
941 kfree(report_lun_header);
942
943 if (rc) {
944 kfree(lun_data);
945 lun_data = NULL;
946 }
947
948 *buffer = lun_data;
949
950 return rc;
951}
952
953static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
954 void **buffer)
955{
956 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
957 buffer);
958}
959
960static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
961 void **buffer)
962{
963 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
964}
965
966static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
967 struct report_phys_lun_extended **physdev_list,
968 struct report_log_lun_extended **logdev_list)
969{
970 int rc;
971 size_t logdev_list_length;
972 size_t logdev_data_length;
973 struct report_log_lun_extended *internal_logdev_list;
974 struct report_log_lun_extended *logdev_data;
975 struct report_lun_header report_lun_header;
976
977 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
978 if (rc)
979 dev_err(&ctrl_info->pci_dev->dev,
980 "report physical LUNs failed\n");
981
982 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
983 if (rc)
984 dev_err(&ctrl_info->pci_dev->dev,
985 "report logical LUNs failed\n");
986
987 /*
988 * Tack the controller itself onto the end of the logical device list.
989 */
990
991 logdev_data = *logdev_list;
992
993 if (logdev_data) {
994 logdev_list_length =
995 get_unaligned_be32(&logdev_data->header.list_length);
996 } else {
997 memset(&report_lun_header, 0, sizeof(report_lun_header));
998 logdev_data =
999 (struct report_log_lun_extended *)&report_lun_header;
1000 logdev_list_length = 0;
1001 }
1002
1003 logdev_data_length = sizeof(struct report_lun_header) +
1004 logdev_list_length;
1005
1006 internal_logdev_list = kmalloc(logdev_data_length +
1007 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1008 if (!internal_logdev_list) {
1009 kfree(*logdev_list);
1010 *logdev_list = NULL;
1011 return -ENOMEM;
1012 }
1013
1014 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1015 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1016 sizeof(struct report_log_lun_extended_entry));
1017 put_unaligned_be32(logdev_list_length +
1018 sizeof(struct report_log_lun_extended_entry),
1019 &internal_logdev_list->header.list_length);
1020
1021 kfree(*logdev_list);
1022 *logdev_list = internal_logdev_list;
1023
1024 return 0;
1025}
1026
1027static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1028 int bus, int target, int lun)
1029{
1030 device->bus = bus;
1031 device->target = target;
1032 device->lun = lun;
1033}
1034
1035static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1036{
1037 u8 *scsi3addr;
1038 u32 lunid;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001039 int bus;
1040 int target;
1041 int lun;
Kevin Barnett6c223762016-06-27 16:41:00 -05001042
1043 scsi3addr = device->scsi3addr;
1044 lunid = get_unaligned_le32(scsi3addr);
1045
1046 if (pqi_is_hba_lunid(scsi3addr)) {
1047 /* The specified device is the controller. */
1048 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1049 device->target_lun_valid = true;
1050 return;
1051 }
1052
1053 if (pqi_is_logical_device(device)) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001054 if (device->is_external_raid_device) {
1055 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1056 target = (lunid >> 16) & 0x3fff;
1057 lun = lunid & 0xff;
1058 } else {
1059 bus = PQI_RAID_VOLUME_BUS;
1060 target = 0;
1061 lun = lunid & 0x3fff;
1062 }
1063 pqi_set_bus_target_lun(device, bus, target, lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05001064 device->target_lun_valid = true;
1065 return;
1066 }
1067
1068 /*
1069 * Defer target and LUN assignment for non-controller physical devices
1070 * because the SAS transport layer will make these assignments later.
1071 */
1072 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1073}
1074
1075static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1076 struct pqi_scsi_dev *device)
1077{
1078 int rc;
1079 u8 raid_level;
1080 u8 *buffer;
1081
1082 raid_level = SA_RAID_UNKNOWN;
1083
1084 buffer = kmalloc(64, GFP_KERNEL);
1085 if (buffer) {
1086 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1087 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1088 if (rc == 0) {
1089 raid_level = buffer[8];
1090 if (raid_level > SA_RAID_MAX)
1091 raid_level = SA_RAID_UNKNOWN;
1092 }
1093 kfree(buffer);
1094 }
1095
1096 device->raid_level = raid_level;
1097}
1098
1099static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1100 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1101{
1102 char *err_msg;
1103 u32 raid_map_size;
1104 u32 r5or6_blocks_per_row;
Kevin Barnett6c223762016-06-27 16:41:00 -05001105
1106 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1107
1108 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1109 err_msg = "RAID map too small";
1110 goto bad_raid_map;
1111 }
1112
Kevin Barnett6c223762016-06-27 16:41:00 -05001113 if (device->raid_level == SA_RAID_1) {
1114 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1115 err_msg = "invalid RAID-1 map";
1116 goto bad_raid_map;
1117 }
1118 } else if (device->raid_level == SA_RAID_ADM) {
1119 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1120 err_msg = "invalid RAID-1(ADM) map";
1121 goto bad_raid_map;
1122 }
1123 } else if ((device->raid_level == SA_RAID_5 ||
1124 device->raid_level == SA_RAID_6) &&
1125 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1126 /* RAID 50/60 */
1127 r5or6_blocks_per_row =
1128 get_unaligned_le16(&raid_map->strip_size) *
1129 get_unaligned_le16(&raid_map->data_disks_per_row);
1130 if (r5or6_blocks_per_row == 0) {
1131 err_msg = "invalid RAID-5 or RAID-6 map";
1132 goto bad_raid_map;
1133 }
1134 }
1135
1136 return 0;
1137
1138bad_raid_map:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001139 dev_warn(&ctrl_info->pci_dev->dev,
Kevin Barnett38a73382017-09-27 16:30:05 -05001140 "logical device %08x%08x %s\n",
1141 *((u32 *)&device->scsi3addr),
1142 *((u32 *)&device->scsi3addr[4]), err_msg);
Kevin Barnett6c223762016-06-27 16:41:00 -05001143
1144 return -EINVAL;
1145}
1146
1147static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1148 struct pqi_scsi_dev *device)
1149{
1150 int rc;
Ajish Koshya91aaae2018-12-07 16:29:31 -06001151 u32 raid_map_size;
Kevin Barnett6c223762016-06-27 16:41:00 -05001152 struct raid_map *raid_map;
1153
1154 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1155 if (!raid_map)
1156 return -ENOMEM;
1157
Ajish Koshya91aaae2018-12-07 16:29:31 -06001158 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1159 device->scsi3addr, raid_map, sizeof(*raid_map),
1160 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -05001161
1162 if (rc)
1163 goto error;
1164
Ajish Koshya91aaae2018-12-07 16:29:31 -06001165 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1166
1167 if (raid_map_size > sizeof(*raid_map)) {
1168
1169 kfree(raid_map);
1170
1171 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1172 if (!raid_map)
1173 return -ENOMEM;
1174
1175 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1176 device->scsi3addr, raid_map, raid_map_size,
1177 0, NULL, NO_TIMEOUT);
1178 if (rc)
1179 goto error;
1180
1181 if (get_unaligned_le32(&raid_map->structure_size)
1182 != raid_map_size) {
1183 dev_warn(&ctrl_info->pci_dev->dev,
1184 "Requested %d bytes, received %d bytes",
1185 raid_map_size,
1186 get_unaligned_le32(&raid_map->structure_size));
1187 goto error;
1188 }
1189 }
1190
Kevin Barnett6c223762016-06-27 16:41:00 -05001191 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1192 if (rc)
1193 goto error;
1194
1195 device->raid_map = raid_map;
1196
1197 return 0;
1198
1199error:
1200 kfree(raid_map);
1201
1202 return rc;
1203}
1204
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001205static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05001206 struct pqi_scsi_dev *device)
1207{
1208 int rc;
1209 u8 *buffer;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001210 u8 bypass_status;
Kevin Barnett6c223762016-06-27 16:41:00 -05001211
1212 buffer = kmalloc(64, GFP_KERNEL);
1213 if (!buffer)
1214 return;
1215
1216 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001217 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
Kevin Barnett6c223762016-06-27 16:41:00 -05001218 if (rc)
1219 goto out;
1220
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001221#define RAID_BYPASS_STATUS 4
1222#define RAID_BYPASS_CONFIGURED 0x1
1223#define RAID_BYPASS_ENABLED 0x2
Kevin Barnett6c223762016-06-27 16:41:00 -05001224
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001225 bypass_status = buffer[RAID_BYPASS_STATUS];
1226 device->raid_bypass_configured =
1227 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1228 if (device->raid_bypass_configured &&
1229 (bypass_status & RAID_BYPASS_ENABLED) &&
1230 pqi_get_raid_map(ctrl_info, device) == 0)
1231 device->raid_bypass_enabled = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001232
1233out:
1234 kfree(buffer);
1235}
1236
1237/*
1238 * Use vendor-specific VPD to determine online/offline status of a volume.
1239 */
1240
1241static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1242 struct pqi_scsi_dev *device)
1243{
1244 int rc;
1245 size_t page_length;
1246 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1247 bool volume_offline = true;
1248 u32 volume_flags;
1249 struct ciss_vpd_logical_volume_status *vpd;
1250
1251 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1252 if (!vpd)
1253 goto no_buffer;
1254
1255 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1256 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1257 if (rc)
1258 goto out;
1259
Dave Carroll7ff44492018-12-07 16:29:45 -06001260 if (vpd->page_code != CISS_VPD_LV_STATUS)
1261 goto out;
1262
Kevin Barnett6c223762016-06-27 16:41:00 -05001263 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1264 volume_status) + vpd->page_length;
1265 if (page_length < sizeof(*vpd))
1266 goto out;
1267
1268 volume_status = vpd->volume_status;
1269 volume_flags = get_unaligned_be32(&vpd->flags);
1270 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1271
1272out:
1273 kfree(vpd);
1274no_buffer:
1275 device->volume_status = volume_status;
1276 device->volume_offline = volume_offline;
1277}
1278
Kevin Barnett26b390a2018-06-18 13:22:48 -05001279#define PQI_INQUIRY_PAGE0_RETRIES 3
1280
Kevin Barnett6c223762016-06-27 16:41:00 -05001281static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1282 struct pqi_scsi_dev *device)
1283{
1284 int rc;
1285 u8 *buffer;
Kevin Barnett26b390a2018-06-18 13:22:48 -05001286 unsigned int retries;
Kevin Barnett6c223762016-06-27 16:41:00 -05001287
Don Brace3d46a592018-12-07 16:30:05 -06001288 if (device->is_expander_smp_device)
1289 return 0;
1290
Kevin Barnett6c223762016-06-27 16:41:00 -05001291 buffer = kmalloc(64, GFP_KERNEL);
1292 if (!buffer)
1293 return -ENOMEM;
1294
1295 /* Send an inquiry to the device to see what it is. */
Kevin Barnett26b390a2018-06-18 13:22:48 -05001296 for (retries = 0;;) {
1297 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1298 buffer, 64);
1299 if (rc == 0)
1300 break;
1301 if (pqi_is_logical_device(device) ||
1302 rc != PQI_CMD_STATUS_ABORTED ||
1303 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1304 goto out;
1305 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001306
1307 scsi_sanitize_inquiry_string(&buffer[8], 8);
1308 scsi_sanitize_inquiry_string(&buffer[16], 16);
1309
1310 device->devtype = buffer[0] & 0x1f;
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001311 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1312 memcpy(device->model, &buffer[16], sizeof(device->model));
Kevin Barnett6c223762016-06-27 16:41:00 -05001313
1314 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001315 if (device->is_external_raid_device) {
1316 device->raid_level = SA_RAID_UNKNOWN;
1317 device->volume_status = CISS_LV_OK;
1318 device->volume_offline = false;
1319 } else {
1320 pqi_get_raid_level(ctrl_info, device);
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001321 pqi_get_raid_bypass_status(ctrl_info, device);
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001322 pqi_get_volume_status(ctrl_info, device);
1323 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001324 }
1325
Dave Carrollcd128242018-12-07 16:28:47 -06001326 if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1327 device->unique_id, sizeof(device->unique_id)) < 0)
1328 dev_warn(&ctrl_info->pci_dev->dev,
1329 "Can't get device id for scsi %d:%d:%d:%d\n",
1330 ctrl_info->scsi_host->host_no,
1331 device->bus, device->target,
1332 device->lun);
1333
Kevin Barnett6c223762016-06-27 16:41:00 -05001334out:
1335 kfree(buffer);
1336
1337 return rc;
1338}
1339
1340static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1341 struct pqi_scsi_dev *device,
1342 struct bmic_identify_physical_device *id_phys)
1343{
1344 int rc;
1345
1346 memset(id_phys, 0, sizeof(*id_phys));
1347
1348 rc = pqi_identify_physical_device(ctrl_info, device,
1349 id_phys, sizeof(*id_phys));
1350 if (rc) {
1351 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1352 return;
1353 }
1354
1355 device->queue_depth =
1356 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1357 device->device_type = id_phys->device_type;
1358 device->active_path_index = id_phys->active_path_number;
1359 device->path_map = id_phys->redundant_path_present_map;
1360 memcpy(&device->box,
1361 &id_phys->alternate_paths_phys_box_on_port,
1362 sizeof(device->box));
1363 memcpy(&device->phys_connector,
1364 &id_phys->alternate_paths_phys_connector,
1365 sizeof(device->phys_connector));
1366 device->bay = id_phys->phys_bay_in_box;
1367}
1368
1369static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1370 struct pqi_scsi_dev *device)
1371{
1372 char *status;
1373 static const char unknown_state_str[] =
1374 "Volume is in an unknown state (%u)";
1375 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1376
1377 switch (device->volume_status) {
1378 case CISS_LV_OK:
1379 status = "Volume online";
1380 break;
1381 case CISS_LV_FAILED:
1382 status = "Volume failed";
1383 break;
1384 case CISS_LV_NOT_CONFIGURED:
1385 status = "Volume not configured";
1386 break;
1387 case CISS_LV_DEGRADED:
1388 status = "Volume degraded";
1389 break;
1390 case CISS_LV_READY_FOR_RECOVERY:
1391 status = "Volume ready for recovery operation";
1392 break;
1393 case CISS_LV_UNDERGOING_RECOVERY:
1394 status = "Volume undergoing recovery";
1395 break;
1396 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1397 status = "Wrong physical drive was replaced";
1398 break;
1399 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1400 status = "A physical drive not properly connected";
1401 break;
1402 case CISS_LV_HARDWARE_OVERHEATING:
1403 status = "Hardware is overheating";
1404 break;
1405 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1406 status = "Hardware has overheated";
1407 break;
1408 case CISS_LV_UNDERGOING_EXPANSION:
1409 status = "Volume undergoing expansion";
1410 break;
1411 case CISS_LV_NOT_AVAILABLE:
1412 status = "Volume waiting for transforming volume";
1413 break;
1414 case CISS_LV_QUEUED_FOR_EXPANSION:
1415 status = "Volume queued for expansion";
1416 break;
1417 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1418 status = "Volume disabled due to SCSI ID conflict";
1419 break;
1420 case CISS_LV_EJECTED:
1421 status = "Volume has been ejected";
1422 break;
1423 case CISS_LV_UNDERGOING_ERASE:
1424 status = "Volume undergoing background erase";
1425 break;
1426 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1427 status = "Volume ready for predictive spare rebuild";
1428 break;
1429 case CISS_LV_UNDERGOING_RPI:
1430 status = "Volume undergoing rapid parity initialization";
1431 break;
1432 case CISS_LV_PENDING_RPI:
1433 status = "Volume queued for rapid parity initialization";
1434 break;
1435 case CISS_LV_ENCRYPTED_NO_KEY:
1436 status = "Encrypted volume inaccessible - key not present";
1437 break;
1438 case CISS_LV_UNDERGOING_ENCRYPTION:
1439 status = "Volume undergoing encryption process";
1440 break;
1441 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1442 status = "Volume undergoing encryption re-keying process";
1443 break;
1444 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001445 status = "Volume encrypted but encryption is disabled";
Kevin Barnett6c223762016-06-27 16:41:00 -05001446 break;
1447 case CISS_LV_PENDING_ENCRYPTION:
1448 status = "Volume pending migration to encrypted state";
1449 break;
1450 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1451 status = "Volume pending encryption rekeying";
1452 break;
1453 case CISS_LV_NOT_SUPPORTED:
1454 status = "Volume not supported on this controller";
1455 break;
1456 case CISS_LV_STATUS_UNAVAILABLE:
1457 status = "Volume status not available";
1458 break;
1459 default:
1460 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1461 unknown_state_str, device->volume_status);
1462 status = unknown_state_buffer;
1463 break;
1464 }
1465
1466 dev_info(&ctrl_info->pci_dev->dev,
1467 "scsi %d:%d:%d:%d %s\n",
1468 ctrl_info->scsi_host->host_no,
1469 device->bus, device->target, device->lun, status);
1470}
1471
Kevin Barnett6c223762016-06-27 16:41:00 -05001472static void pqi_rescan_worker(struct work_struct *work)
1473{
1474 struct pqi_ctrl_info *ctrl_info;
1475
1476 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1477 rescan_work);
1478
1479 pqi_scan_scsi_devices(ctrl_info);
1480}
1481
1482static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1483 struct pqi_scsi_dev *device)
1484{
1485 int rc;
1486
1487 if (pqi_is_logical_device(device))
1488 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1489 device->target, device->lun);
1490 else
1491 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1492
1493 return rc;
1494}
1495
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06001496#define PQI_PENDING_IO_TIMEOUT_SECS 20
1497
Kevin Barnett6c223762016-06-27 16:41:00 -05001498static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1499 struct pqi_scsi_dev *device)
1500{
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06001501 int rc;
1502
1503 pqi_device_remove_start(device);
1504
1505 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1506 PQI_PENDING_IO_TIMEOUT_SECS);
1507 if (rc)
1508 dev_err(&ctrl_info->pci_dev->dev,
1509 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1510 ctrl_info->scsi_host->host_no, device->bus,
1511 device->target, device->lun,
1512 atomic_read(&device->scsi_cmds_outstanding));
1513
Kevin Barnett6c223762016-06-27 16:41:00 -05001514 if (pqi_is_logical_device(device))
1515 scsi_remove_device(device->sdev);
1516 else
1517 pqi_remove_sas_device(device);
1518}
1519
1520/* Assumes the SCSI device list lock is held. */
1521
1522static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1523 int bus, int target, int lun)
1524{
1525 struct pqi_scsi_dev *device;
1526
1527 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1528 scsi_device_list_entry)
1529 if (device->bus == bus && device->target == target &&
1530 device->lun == lun)
1531 return device;
1532
1533 return NULL;
1534}
1535
1536static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1537 struct pqi_scsi_dev *dev2)
1538{
1539 if (dev1->is_physical_device != dev2->is_physical_device)
1540 return false;
1541
1542 if (dev1->is_physical_device)
1543 return dev1->wwid == dev2->wwid;
1544
1545 return memcmp(dev1->volume_id, dev2->volume_id,
1546 sizeof(dev1->volume_id)) == 0;
1547}
1548
1549enum pqi_find_result {
1550 DEVICE_NOT_FOUND,
1551 DEVICE_CHANGED,
1552 DEVICE_SAME,
1553};
1554
1555static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1556 struct pqi_scsi_dev *device_to_find,
1557 struct pqi_scsi_dev **matching_device)
1558{
1559 struct pqi_scsi_dev *device;
1560
1561 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1562 scsi_device_list_entry) {
1563 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1564 device->scsi3addr)) {
1565 *matching_device = device;
1566 if (pqi_device_equal(device_to_find, device)) {
1567 if (device_to_find->volume_offline)
1568 return DEVICE_CHANGED;
1569 return DEVICE_SAME;
1570 }
1571 return DEVICE_CHANGED;
1572 }
1573 }
1574
1575 return DEVICE_NOT_FOUND;
1576}
1577
Don Brace3d46a592018-12-07 16:30:05 -06001578static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1579{
1580 if (device->is_expander_smp_device)
1581 return "Enclosure SMP ";
1582
1583 return scsi_device_type(device->devtype);
1584}
1585
Kevin Barnett6de783f2017-05-03 18:55:19 -05001586#define PQI_DEV_INFO_BUFFER_LENGTH 128
1587
Kevin Barnett6c223762016-06-27 16:41:00 -05001588static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1589 char *action, struct pqi_scsi_dev *device)
1590{
Kevin Barnett6de783f2017-05-03 18:55:19 -05001591 ssize_t count;
1592 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1593
1594 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1595 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1596
1597 if (device->target_lun_valid)
1598 count += snprintf(buffer + count,
1599 PQI_DEV_INFO_BUFFER_LENGTH - count,
1600 "%d:%d",
1601 device->target,
1602 device->lun);
1603 else
1604 count += snprintf(buffer + count,
1605 PQI_DEV_INFO_BUFFER_LENGTH - count,
1606 "-:-");
1607
1608 if (pqi_is_logical_device(device))
1609 count += snprintf(buffer + count,
1610 PQI_DEV_INFO_BUFFER_LENGTH - count,
1611 " %08x%08x",
1612 *((u32 *)&device->scsi3addr),
1613 *((u32 *)&device->scsi3addr[4]));
1614 else
1615 count += snprintf(buffer + count,
1616 PQI_DEV_INFO_BUFFER_LENGTH - count,
1617 " %016llx", device->sas_address);
1618
1619 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1620 " %s %.8s %.16s ",
Don Brace3d46a592018-12-07 16:30:05 -06001621 pqi_device_type(device),
Kevin Barnett6c223762016-06-27 16:41:00 -05001622 device->vendor,
Kevin Barnett6de783f2017-05-03 18:55:19 -05001623 device->model);
1624
1625 if (pqi_is_logical_device(device)) {
1626 if (device->devtype == TYPE_DISK)
1627 count += snprintf(buffer + count,
1628 PQI_DEV_INFO_BUFFER_LENGTH - count,
1629 "SSDSmartPathCap%c En%c %-12s",
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001630 device->raid_bypass_configured ? '+' : '-',
1631 device->raid_bypass_enabled ? '+' : '-',
Kevin Barnett6de783f2017-05-03 18:55:19 -05001632 pqi_raid_level_to_string(device->raid_level));
1633 } else {
1634 count += snprintf(buffer + count,
1635 PQI_DEV_INFO_BUFFER_LENGTH - count,
1636 "AIO%c", device->aio_enabled ? '+' : '-');
1637 if (device->devtype == TYPE_DISK ||
1638 device->devtype == TYPE_ZBC)
1639 count += snprintf(buffer + count,
1640 PQI_DEV_INFO_BUFFER_LENGTH - count,
1641 " qd=%-6d", device->queue_depth);
1642 }
1643
1644 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
Kevin Barnett6c223762016-06-27 16:41:00 -05001645}
1646
1647/* Assumes the SCSI device list lock is held. */
1648
1649static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1650 struct pqi_scsi_dev *new_device)
1651{
1652 existing_device->devtype = new_device->devtype;
1653 existing_device->device_type = new_device->device_type;
1654 existing_device->bus = new_device->bus;
1655 if (new_device->target_lun_valid) {
1656 existing_device->target = new_device->target;
1657 existing_device->lun = new_device->lun;
1658 existing_device->target_lun_valid = true;
1659 }
1660
1661 /* By definition, the scsi3addr and wwid fields are already the same. */
1662
1663 existing_device->is_physical_device = new_device->is_physical_device;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001664 existing_device->is_external_raid_device =
1665 new_device->is_external_raid_device;
Don Brace3d46a592018-12-07 16:30:05 -06001666 existing_device->is_expander_smp_device =
1667 new_device->is_expander_smp_device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001668 existing_device->aio_enabled = new_device->aio_enabled;
1669 memcpy(existing_device->vendor, new_device->vendor,
1670 sizeof(existing_device->vendor));
1671 memcpy(existing_device->model, new_device->model,
1672 sizeof(existing_device->model));
1673 existing_device->sas_address = new_device->sas_address;
1674 existing_device->raid_level = new_device->raid_level;
1675 existing_device->queue_depth = new_device->queue_depth;
1676 existing_device->aio_handle = new_device->aio_handle;
1677 existing_device->volume_status = new_device->volume_status;
1678 existing_device->active_path_index = new_device->active_path_index;
1679 existing_device->path_map = new_device->path_map;
1680 existing_device->bay = new_device->bay;
1681 memcpy(existing_device->box, new_device->box,
1682 sizeof(existing_device->box));
1683 memcpy(existing_device->phys_connector, new_device->phys_connector,
1684 sizeof(existing_device->phys_connector));
Kevin Barnett6c223762016-06-27 16:41:00 -05001685 existing_device->offload_to_mirror = 0;
1686 kfree(existing_device->raid_map);
1687 existing_device->raid_map = new_device->raid_map;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001688 existing_device->raid_bypass_configured =
1689 new_device->raid_bypass_configured;
1690 existing_device->raid_bypass_enabled =
1691 new_device->raid_bypass_enabled;
Dave Carrolla9a68102018-12-07 16:29:37 -06001692 existing_device->device_offline = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05001693
1694 /* To prevent this from being freed later. */
1695 new_device->raid_map = NULL;
1696}
1697
1698static inline void pqi_free_device(struct pqi_scsi_dev *device)
1699{
1700 if (device) {
1701 kfree(device->raid_map);
1702 kfree(device);
1703 }
1704}
1705
1706/*
1707 * Called when exposing a new device to the OS fails in order to re-adjust
1708 * our internal SCSI device list to match the SCSI ML's view.
1709 */
1710
1711static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1712 struct pqi_scsi_dev *device)
1713{
1714 unsigned long flags;
1715
1716 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1717 list_del(&device->scsi_device_list_entry);
1718 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1719
1720 /* Allow the device structure to be freed later. */
1721 device->keep_device = false;
1722}
1723
Don Brace3d46a592018-12-07 16:30:05 -06001724static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1725{
1726 if (device->is_expander_smp_device)
1727 return device->sas_port != NULL;
1728
1729 return device->sdev != NULL;
1730}
1731
Kevin Barnett6c223762016-06-27 16:41:00 -05001732static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1733 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1734{
1735 int rc;
1736 unsigned int i;
1737 unsigned long flags;
1738 enum pqi_find_result find_result;
1739 struct pqi_scsi_dev *device;
1740 struct pqi_scsi_dev *next;
1741 struct pqi_scsi_dev *matching_device;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001742 LIST_HEAD(add_list);
1743 LIST_HEAD(delete_list);
Kevin Barnett6c223762016-06-27 16:41:00 -05001744
1745 /*
1746 * The idea here is to do as little work as possible while holding the
1747 * spinlock. That's why we go to great pains to defer anything other
1748 * than updating the internal device list until after we release the
1749 * spinlock.
1750 */
1751
1752 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1753
1754 /* Assume that all devices in the existing list have gone away. */
1755 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1756 scsi_device_list_entry)
1757 device->device_gone = true;
1758
1759 for (i = 0; i < num_new_devices; i++) {
1760 device = new_device_list[i];
1761
1762 find_result = pqi_scsi_find_entry(ctrl_info, device,
1763 &matching_device);
1764
1765 switch (find_result) {
1766 case DEVICE_SAME:
1767 /*
1768 * The newly found device is already in the existing
1769 * device list.
1770 */
1771 device->new_device = false;
1772 matching_device->device_gone = false;
1773 pqi_scsi_update_device(matching_device, device);
1774 break;
1775 case DEVICE_NOT_FOUND:
1776 /*
1777 * The newly found device is NOT in the existing device
1778 * list.
1779 */
1780 device->new_device = true;
1781 break;
1782 case DEVICE_CHANGED:
1783 /*
1784 * The original device has gone away and we need to add
1785 * the new device.
1786 */
1787 device->new_device = true;
1788 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05001789 }
1790 }
1791
1792 /* Process all devices that have gone away. */
1793 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1794 scsi_device_list_entry) {
1795 if (device->device_gone) {
1796 list_del(&device->scsi_device_list_entry);
1797 list_add_tail(&device->delete_list_entry, &delete_list);
1798 }
1799 }
1800
1801 /* Process all new devices. */
1802 for (i = 0; i < num_new_devices; i++) {
1803 device = new_device_list[i];
1804 if (!device->new_device)
1805 continue;
1806 if (device->volume_offline)
1807 continue;
1808 list_add_tail(&device->scsi_device_list_entry,
1809 &ctrl_info->scsi_device_list);
1810 list_add_tail(&device->add_list_entry, &add_list);
1811 /* To prevent this device structure from being freed later. */
1812 device->keep_device = true;
1813 }
1814
Kevin Barnett6c223762016-06-27 16:41:00 -05001815 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1816
1817 /* Remove all devices that have gone away. */
1818 list_for_each_entry_safe(device, next, &delete_list,
1819 delete_list_entry) {
Kevin Barnett6c223762016-06-27 16:41:00 -05001820 if (device->volume_offline) {
1821 pqi_dev_info(ctrl_info, "offline", device);
1822 pqi_show_volume_status(ctrl_info, device);
1823 } else {
1824 pqi_dev_info(ctrl_info, "removed", device);
1825 }
Don Brace3d46a592018-12-07 16:30:05 -06001826 if (pqi_is_device_added(device))
Kevin Barnett6de783f2017-05-03 18:55:19 -05001827 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001828 list_del(&device->delete_list_entry);
1829 pqi_free_device(device);
1830 }
1831
1832 /*
1833 * Notify the SCSI ML if the queue depth of any existing device has
1834 * changed.
1835 */
1836 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1837 scsi_device_list_entry) {
1838 if (device->sdev && device->queue_depth !=
1839 device->advertised_queue_depth) {
1840 device->advertised_queue_depth = device->queue_depth;
1841 scsi_change_queue_depth(device->sdev,
1842 device->advertised_queue_depth);
1843 }
1844 }
1845
1846 /* Expose any new devices. */
1847 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
Don Brace3d46a592018-12-07 16:30:05 -06001848 if (!pqi_is_device_added(device)) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05001849 pqi_dev_info(ctrl_info, "added", device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001850 rc = pqi_add_device(ctrl_info, device);
1851 if (rc) {
1852 dev_warn(&ctrl_info->pci_dev->dev,
1853 "scsi %d:%d:%d:%d addition failed, device not added\n",
1854 ctrl_info->scsi_host->host_no,
1855 device->bus, device->target,
1856 device->lun);
1857 pqi_fixup_botched_add(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001858 }
1859 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001860 }
1861}
1862
1863static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1864{
Don Brace3d46a592018-12-07 16:30:05 -06001865 bool is_supported;
1866
1867 if (device->is_expander_smp_device)
1868 return true;
1869
1870 is_supported = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05001871
1872 switch (device->devtype) {
1873 case TYPE_DISK:
1874 case TYPE_ZBC:
1875 case TYPE_TAPE:
1876 case TYPE_MEDIUM_CHANGER:
1877 case TYPE_ENCLOSURE:
1878 is_supported = true;
1879 break;
1880 case TYPE_RAID:
1881 /*
1882 * Only support the HBA controller itself as a RAID
1883 * controller. If it's a RAID controller other than
Kevin Barnett376fb882017-05-03 18:54:43 -05001884 * the HBA itself (an external RAID controller, for
1885 * example), we don't support it.
Kevin Barnett6c223762016-06-27 16:41:00 -05001886 */
1887 if (pqi_is_hba_lunid(device->scsi3addr))
1888 is_supported = true;
1889 break;
1890 }
1891
1892 return is_supported;
1893}
1894
Kevin Barnett94086f52017-05-03 18:54:31 -05001895static inline bool pqi_skip_device(u8 *scsi3addr)
Kevin Barnett6c223762016-06-27 16:41:00 -05001896{
Kevin Barnett94086f52017-05-03 18:54:31 -05001897 /* Ignore all masked devices. */
1898 if (MASKED_DEVICE(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05001899 return true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001900
1901 return false;
1902}
1903
Don Brace3d46a592018-12-07 16:30:05 -06001904static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1905{
1906 if (!device->is_physical_device)
1907 return false;
1908
1909 if (device->is_expander_smp_device)
1910 return true;
1911
1912 switch (device->devtype) {
1913 case TYPE_DISK:
1914 case TYPE_ZBC:
1915 case TYPE_ENCLOSURE:
1916 return true;
1917 }
1918
1919 return false;
1920}
1921
Dave Carrollcd128242018-12-07 16:28:47 -06001922static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1923{
1924 return !device->is_physical_device ||
1925 !pqi_skip_device(device->scsi3addr);
1926}
1927
Kevin Barnett6c223762016-06-27 16:41:00 -05001928static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1929{
1930 int i;
1931 int rc;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001932 LIST_HEAD(new_device_list_head);
Kevin Barnett6c223762016-06-27 16:41:00 -05001933 struct report_phys_lun_extended *physdev_list = NULL;
1934 struct report_log_lun_extended *logdev_list = NULL;
1935 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1936 struct report_log_lun_extended_entry *log_lun_ext_entry;
1937 struct bmic_identify_physical_device *id_phys = NULL;
1938 u32 num_physicals;
1939 u32 num_logicals;
1940 struct pqi_scsi_dev **new_device_list = NULL;
1941 struct pqi_scsi_dev *device;
1942 struct pqi_scsi_dev *next;
1943 unsigned int num_new_devices;
1944 unsigned int num_valid_devices;
1945 bool is_physical_device;
1946 u8 *scsi3addr;
1947 static char *out_of_memory_msg =
Kevin Barnett6de783f2017-05-03 18:55:19 -05001948 "failed to allocate memory, device discovery stopped";
Kevin Barnett6c223762016-06-27 16:41:00 -05001949
Kevin Barnett6c223762016-06-27 16:41:00 -05001950 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1951 if (rc)
1952 goto out;
1953
1954 if (physdev_list)
1955 num_physicals =
1956 get_unaligned_be32(&physdev_list->header.list_length)
1957 / sizeof(physdev_list->lun_entries[0]);
1958 else
1959 num_physicals = 0;
1960
1961 if (logdev_list)
1962 num_logicals =
1963 get_unaligned_be32(&logdev_list->header.list_length)
1964 / sizeof(logdev_list->lun_entries[0]);
1965 else
1966 num_logicals = 0;
1967
1968 if (num_physicals) {
1969 /*
1970 * We need this buffer for calls to pqi_get_physical_disk_info()
1971 * below. We allocate it here instead of inside
1972 * pqi_get_physical_disk_info() because it's a fairly large
1973 * buffer.
1974 */
1975 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1976 if (!id_phys) {
1977 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1978 out_of_memory_msg);
1979 rc = -ENOMEM;
1980 goto out;
1981 }
1982 }
1983
1984 num_new_devices = num_physicals + num_logicals;
1985
Kees Cook6da2ec52018-06-12 13:55:00 -07001986 new_device_list = kmalloc_array(num_new_devices,
1987 sizeof(*new_device_list),
1988 GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05001989 if (!new_device_list) {
1990 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1991 rc = -ENOMEM;
1992 goto out;
1993 }
1994
1995 for (i = 0; i < num_new_devices; i++) {
1996 device = kzalloc(sizeof(*device), GFP_KERNEL);
1997 if (!device) {
1998 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1999 out_of_memory_msg);
2000 rc = -ENOMEM;
2001 goto out;
2002 }
2003 list_add_tail(&device->new_device_list_entry,
2004 &new_device_list_head);
2005 }
2006
2007 device = NULL;
2008 num_valid_devices = 0;
2009
2010 for (i = 0; i < num_new_devices; i++) {
2011
2012 if (i < num_physicals) {
2013 is_physical_device = true;
2014 phys_lun_ext_entry = &physdev_list->lun_entries[i];
2015 log_lun_ext_entry = NULL;
2016 scsi3addr = phys_lun_ext_entry->lunid;
2017 } else {
2018 is_physical_device = false;
2019 phys_lun_ext_entry = NULL;
2020 log_lun_ext_entry =
2021 &logdev_list->lun_entries[i - num_physicals];
2022 scsi3addr = log_lun_ext_entry->lunid;
2023 }
2024
Kevin Barnett94086f52017-05-03 18:54:31 -05002025 if (is_physical_device && pqi_skip_device(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05002026 continue;
2027
2028 if (device)
2029 device = list_next_entry(device, new_device_list_entry);
2030 else
2031 device = list_first_entry(&new_device_list_head,
2032 struct pqi_scsi_dev, new_device_list_entry);
2033
2034 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2035 device->is_physical_device = is_physical_device;
Don Brace3d46a592018-12-07 16:30:05 -06002036 if (is_physical_device) {
2037 if (phys_lun_ext_entry->device_type ==
2038 SA_EXPANDER_SMP_DEVICE)
2039 device->is_expander_smp_device = true;
2040 } else {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05002041 device->is_external_raid_device =
2042 pqi_is_external_raid_addr(scsi3addr);
Don Brace3d46a592018-12-07 16:30:05 -06002043 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002044
2045 /* Gather information about the device. */
2046 rc = pqi_get_device_info(ctrl_info, device);
2047 if (rc == -ENOMEM) {
2048 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2049 out_of_memory_msg);
2050 goto out;
2051 }
2052 if (rc) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05002053 if (device->is_physical_device)
2054 dev_warn(&ctrl_info->pci_dev->dev,
2055 "obtaining device info failed, skipping physical device %016llx\n",
2056 get_unaligned_be64(
2057 &phys_lun_ext_entry->wwid));
2058 else
2059 dev_warn(&ctrl_info->pci_dev->dev,
2060 "obtaining device info failed, skipping logical device %08x%08x\n",
2061 *((u32 *)&device->scsi3addr),
2062 *((u32 *)&device->scsi3addr[4]));
Kevin Barnett6c223762016-06-27 16:41:00 -05002063 rc = 0;
2064 continue;
2065 }
2066
2067 if (!pqi_is_supported_device(device))
2068 continue;
2069
2070 pqi_assign_bus_target_lun(device);
2071
Kevin Barnett6c223762016-06-27 16:41:00 -05002072 if (device->is_physical_device) {
2073 device->wwid = phys_lun_ext_entry->wwid;
2074 if ((phys_lun_ext_entry->device_flags &
2075 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
Don Brace3d46a592018-12-07 16:30:05 -06002076 phys_lun_ext_entry->aio_handle) {
Kevin Barnett6c223762016-06-27 16:41:00 -05002077 device->aio_enabled = true;
Don Brace3d46a592018-12-07 16:30:05 -06002078 device->aio_handle =
2079 phys_lun_ext_entry->aio_handle;
2080 }
2081 if (device->devtype == TYPE_DISK ||
2082 device->devtype == TYPE_ZBC) {
2083 pqi_get_physical_disk_info(ctrl_info,
2084 device, id_phys);
2085 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002086 } else {
2087 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2088 sizeof(device->volume_id));
2089 }
2090
Don Brace3d46a592018-12-07 16:30:05 -06002091 if (pqi_is_device_with_sas_address(device))
2092 device->sas_address = get_unaligned_be64(&device->wwid);
Kevin Barnett6c223762016-06-27 16:41:00 -05002093
2094 new_device_list[num_valid_devices++] = device;
2095 }
2096
2097 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2098
2099out:
2100 list_for_each_entry_safe(device, next, &new_device_list_head,
2101 new_device_list_entry) {
2102 if (device->keep_device)
2103 continue;
2104 list_del(&device->new_device_list_entry);
2105 pqi_free_device(device);
2106 }
2107
2108 kfree(new_device_list);
2109 kfree(physdev_list);
2110 kfree(logdev_list);
2111 kfree(id_phys);
2112
2113 return rc;
2114}
2115
2116static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2117{
2118 unsigned long flags;
2119 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002120
Kevin Barnetta37ef742017-05-03 18:52:22 -05002121 while (1) {
2122 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002123
Kevin Barnetta37ef742017-05-03 18:52:22 -05002124 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2125 struct pqi_scsi_dev, scsi_device_list_entry);
2126 if (device)
2127 list_del(&device->scsi_device_list_entry);
2128
2129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2130 flags);
2131
2132 if (!device)
2133 break;
2134
Don Brace3d46a592018-12-07 16:30:05 -06002135 if (pqi_is_device_added(device))
Kevin Barnett6c223762016-06-27 16:41:00 -05002136 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05002137 pqi_free_device(device);
2138 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002139}
2140
2141static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2142{
2143 int rc;
2144
2145 if (pqi_ctrl_offline(ctrl_info))
2146 return -ENXIO;
2147
2148 mutex_lock(&ctrl_info->scan_mutex);
2149
2150 rc = pqi_update_scsi_devices(ctrl_info);
2151 if (rc)
Kevin Barnett5f310422017-05-03 18:54:55 -05002152 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002153
2154 mutex_unlock(&ctrl_info->scan_mutex);
2155
2156 return rc;
2157}
2158
2159static void pqi_scan_start(struct Scsi_Host *shost)
2160{
2161 pqi_scan_scsi_devices(shost_to_hba(shost));
2162}
2163
2164/* Returns TRUE if scan is finished. */
2165
2166static int pqi_scan_finished(struct Scsi_Host *shost,
2167 unsigned long elapsed_time)
2168{
2169 struct pqi_ctrl_info *ctrl_info;
2170
2171 ctrl_info = shost_priv(shost);
2172
2173 return !mutex_is_locked(&ctrl_info->scan_mutex);
2174}
2175
Kevin Barnett061ef062017-05-03 18:53:05 -05002176static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2177{
2178 mutex_lock(&ctrl_info->scan_mutex);
2179 mutex_unlock(&ctrl_info->scan_mutex);
2180}
2181
2182static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2183{
2184 mutex_lock(&ctrl_info->lun_reset_mutex);
2185 mutex_unlock(&ctrl_info->lun_reset_mutex);
2186}
2187
Kevin Barnett6c223762016-06-27 16:41:00 -05002188static inline void pqi_set_encryption_info(
2189 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2190 u64 first_block)
2191{
2192 u32 volume_blk_size;
2193
2194 /*
2195 * Set the encryption tweak values based on logical block address.
2196 * If the block size is 512, the tweak value is equal to the LBA.
2197 * For other block sizes, tweak value is (LBA * block size) / 512.
2198 */
2199 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2200 if (volume_blk_size != 512)
2201 first_block = (first_block * volume_blk_size) / 512;
2202
2203 encryption_info->data_encryption_key_index =
2204 get_unaligned_le16(&raid_map->data_encryption_key_index);
2205 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2206 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2207}
2208
2209/*
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002210 * Attempt to perform RAID bypass mapping for a logical volume I/O.
Kevin Barnett6c223762016-06-27 16:41:00 -05002211 */
2212
2213#define PQI_RAID_BYPASS_INELIGIBLE 1
2214
2215static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2216 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2217 struct pqi_queue_group *queue_group)
2218{
2219 struct raid_map *raid_map;
2220 bool is_write = false;
2221 u32 map_index;
2222 u64 first_block;
2223 u64 last_block;
2224 u32 block_cnt;
2225 u32 blocks_per_row;
2226 u64 first_row;
2227 u64 last_row;
2228 u32 first_row_offset;
2229 u32 last_row_offset;
2230 u32 first_column;
2231 u32 last_column;
2232 u64 r0_first_row;
2233 u64 r0_last_row;
2234 u32 r5or6_blocks_per_row;
2235 u64 r5or6_first_row;
2236 u64 r5or6_last_row;
2237 u32 r5or6_first_row_offset;
2238 u32 r5or6_last_row_offset;
2239 u32 r5or6_first_column;
2240 u32 r5or6_last_column;
2241 u16 data_disks_per_row;
2242 u32 total_disks_per_row;
2243 u16 layout_map_count;
2244 u32 stripesize;
2245 u16 strip_size;
2246 u32 first_group;
2247 u32 last_group;
2248 u32 current_group;
2249 u32 map_row;
2250 u32 aio_handle;
2251 u64 disk_block;
2252 u32 disk_block_cnt;
2253 u8 cdb[16];
2254 u8 cdb_length;
2255 int offload_to_mirror;
2256 struct pqi_encryption_info *encryption_info_ptr;
2257 struct pqi_encryption_info encryption_info;
2258#if BITS_PER_LONG == 32
2259 u64 tmpdiv;
2260#endif
2261
2262 /* Check for valid opcode, get LBA and block count. */
2263 switch (scmd->cmnd[0]) {
2264 case WRITE_6:
2265 is_write = true;
2266 /* fall through */
2267 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002268 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2269 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002270 block_cnt = (u32)scmd->cmnd[4];
2271 if (block_cnt == 0)
2272 block_cnt = 256;
2273 break;
2274 case WRITE_10:
2275 is_write = true;
2276 /* fall through */
2277 case READ_10:
2278 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2279 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2280 break;
2281 case WRITE_12:
2282 is_write = true;
2283 /* fall through */
2284 case READ_12:
2285 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2286 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2287 break;
2288 case WRITE_16:
2289 is_write = true;
2290 /* fall through */
2291 case READ_16:
2292 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2293 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2294 break;
2295 default:
2296 /* Process via normal I/O path. */
2297 return PQI_RAID_BYPASS_INELIGIBLE;
2298 }
2299
2300 /* Check for write to non-RAID-0. */
2301 if (is_write && device->raid_level != SA_RAID_0)
2302 return PQI_RAID_BYPASS_INELIGIBLE;
2303
2304 if (unlikely(block_cnt == 0))
2305 return PQI_RAID_BYPASS_INELIGIBLE;
2306
2307 last_block = first_block + block_cnt - 1;
2308 raid_map = device->raid_map;
2309
2310 /* Check for invalid block or wraparound. */
2311 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2312 last_block < first_block)
2313 return PQI_RAID_BYPASS_INELIGIBLE;
2314
2315 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2316 strip_size = get_unaligned_le16(&raid_map->strip_size);
2317 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2318
2319 /* Calculate stripe information for the request. */
2320 blocks_per_row = data_disks_per_row * strip_size;
2321#if BITS_PER_LONG == 32
2322 tmpdiv = first_block;
2323 do_div(tmpdiv, blocks_per_row);
2324 first_row = tmpdiv;
2325 tmpdiv = last_block;
2326 do_div(tmpdiv, blocks_per_row);
2327 last_row = tmpdiv;
2328 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2329 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2330 tmpdiv = first_row_offset;
2331 do_div(tmpdiv, strip_size);
2332 first_column = tmpdiv;
2333 tmpdiv = last_row_offset;
2334 do_div(tmpdiv, strip_size);
2335 last_column = tmpdiv;
2336#else
2337 first_row = first_block / blocks_per_row;
2338 last_row = last_block / blocks_per_row;
2339 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2340 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2341 first_column = first_row_offset / strip_size;
2342 last_column = last_row_offset / strip_size;
2343#endif
2344
2345 /* If this isn't a single row/column then give to the controller. */
2346 if (first_row != last_row || first_column != last_column)
2347 return PQI_RAID_BYPASS_INELIGIBLE;
2348
2349 /* Proceeding with driver mapping. */
2350 total_disks_per_row = data_disks_per_row +
2351 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2352 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2353 get_unaligned_le16(&raid_map->row_cnt);
2354 map_index = (map_row * total_disks_per_row) + first_column;
2355
2356 /* RAID 1 */
2357 if (device->raid_level == SA_RAID_1) {
2358 if (device->offload_to_mirror)
2359 map_index += data_disks_per_row;
2360 device->offload_to_mirror = !device->offload_to_mirror;
2361 } else if (device->raid_level == SA_RAID_ADM) {
2362 /* RAID ADM */
2363 /*
2364 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2365 * divisible by 3.
2366 */
2367 offload_to_mirror = device->offload_to_mirror;
2368 if (offload_to_mirror == 0) {
2369 /* use physical disk in the first mirrored group. */
2370 map_index %= data_disks_per_row;
2371 } else {
2372 do {
2373 /*
2374 * Determine mirror group that map_index
2375 * indicates.
2376 */
2377 current_group = map_index / data_disks_per_row;
2378
2379 if (offload_to_mirror != current_group) {
2380 if (current_group <
2381 layout_map_count - 1) {
2382 /*
2383 * Select raid index from
2384 * next group.
2385 */
2386 map_index += data_disks_per_row;
2387 current_group++;
2388 } else {
2389 /*
2390 * Select raid index from first
2391 * group.
2392 */
2393 map_index %= data_disks_per_row;
2394 current_group = 0;
2395 }
2396 }
2397 } while (offload_to_mirror != current_group);
2398 }
2399
2400 /* Set mirror group to use next time. */
2401 offload_to_mirror =
2402 (offload_to_mirror >= layout_map_count - 1) ?
2403 0 : offload_to_mirror + 1;
2404 WARN_ON(offload_to_mirror >= layout_map_count);
2405 device->offload_to_mirror = offload_to_mirror;
2406 /*
2407 * Avoid direct use of device->offload_to_mirror within this
2408 * function since multiple threads might simultaneously
2409 * increment it beyond the range of device->layout_map_count -1.
2410 */
2411 } else if ((device->raid_level == SA_RAID_5 ||
2412 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2413 /* RAID 50/60 */
2414 /* Verify first and last block are in same RAID group */
2415 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2416 stripesize = r5or6_blocks_per_row * layout_map_count;
2417#if BITS_PER_LONG == 32
2418 tmpdiv = first_block;
2419 first_group = do_div(tmpdiv, stripesize);
2420 tmpdiv = first_group;
2421 do_div(tmpdiv, r5or6_blocks_per_row);
2422 first_group = tmpdiv;
2423 tmpdiv = last_block;
2424 last_group = do_div(tmpdiv, stripesize);
2425 tmpdiv = last_group;
2426 do_div(tmpdiv, r5or6_blocks_per_row);
2427 last_group = tmpdiv;
2428#else
2429 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2430 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2431#endif
2432 if (first_group != last_group)
2433 return PQI_RAID_BYPASS_INELIGIBLE;
2434
2435 /* Verify request is in a single row of RAID 5/6 */
2436#if BITS_PER_LONG == 32
2437 tmpdiv = first_block;
2438 do_div(tmpdiv, stripesize);
2439 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2440 tmpdiv = last_block;
2441 do_div(tmpdiv, stripesize);
2442 r5or6_last_row = r0_last_row = tmpdiv;
2443#else
2444 first_row = r5or6_first_row = r0_first_row =
2445 first_block / stripesize;
2446 r5or6_last_row = r0_last_row = last_block / stripesize;
2447#endif
2448 if (r5or6_first_row != r5or6_last_row)
2449 return PQI_RAID_BYPASS_INELIGIBLE;
2450
2451 /* Verify request is in a single column */
2452#if BITS_PER_LONG == 32
2453 tmpdiv = first_block;
2454 first_row_offset = do_div(tmpdiv, stripesize);
2455 tmpdiv = first_row_offset;
2456 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2457 r5or6_first_row_offset = first_row_offset;
2458 tmpdiv = last_block;
2459 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2460 tmpdiv = r5or6_last_row_offset;
2461 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2462 tmpdiv = r5or6_first_row_offset;
2463 do_div(tmpdiv, strip_size);
2464 first_column = r5or6_first_column = tmpdiv;
2465 tmpdiv = r5or6_last_row_offset;
2466 do_div(tmpdiv, strip_size);
2467 r5or6_last_column = tmpdiv;
2468#else
2469 first_row_offset = r5or6_first_row_offset =
2470 (u32)((first_block % stripesize) %
2471 r5or6_blocks_per_row);
2472
2473 r5or6_last_row_offset =
2474 (u32)((last_block % stripesize) %
2475 r5or6_blocks_per_row);
2476
2477 first_column = r5or6_first_row_offset / strip_size;
2478 r5or6_first_column = first_column;
2479 r5or6_last_column = r5or6_last_row_offset / strip_size;
2480#endif
2481 if (r5or6_first_column != r5or6_last_column)
2482 return PQI_RAID_BYPASS_INELIGIBLE;
2483
2484 /* Request is eligible */
2485 map_row =
2486 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2487 get_unaligned_le16(&raid_map->row_cnt);
2488
2489 map_index = (first_group *
2490 (get_unaligned_le16(&raid_map->row_cnt) *
2491 total_disks_per_row)) +
2492 (map_row * total_disks_per_row) + first_column;
2493 }
2494
Kevin Barnett6c223762016-06-27 16:41:00 -05002495 aio_handle = raid_map->disk_data[map_index].aio_handle;
2496 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2497 first_row * strip_size +
2498 (first_row_offset - first_column * strip_size);
2499 disk_block_cnt = block_cnt;
2500
2501 /* Handle differing logical/physical block sizes. */
2502 if (raid_map->phys_blk_shift) {
2503 disk_block <<= raid_map->phys_blk_shift;
2504 disk_block_cnt <<= raid_map->phys_blk_shift;
2505 }
2506
2507 if (unlikely(disk_block_cnt > 0xffff))
2508 return PQI_RAID_BYPASS_INELIGIBLE;
2509
2510 /* Build the new CDB for the physical disk I/O. */
2511 if (disk_block > 0xffffffff) {
2512 cdb[0] = is_write ? WRITE_16 : READ_16;
2513 cdb[1] = 0;
2514 put_unaligned_be64(disk_block, &cdb[2]);
2515 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2516 cdb[14] = 0;
2517 cdb[15] = 0;
2518 cdb_length = 16;
2519 } else {
2520 cdb[0] = is_write ? WRITE_10 : READ_10;
2521 cdb[1] = 0;
2522 put_unaligned_be32((u32)disk_block, &cdb[2]);
2523 cdb[6] = 0;
2524 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2525 cdb[9] = 0;
2526 cdb_length = 10;
2527 }
2528
2529 if (get_unaligned_le16(&raid_map->flags) &
2530 RAID_MAP_ENCRYPTION_ENABLED) {
2531 pqi_set_encryption_info(&encryption_info, raid_map,
2532 first_block);
2533 encryption_info_ptr = &encryption_info;
2534 } else {
2535 encryption_info_ptr = NULL;
2536 }
2537
2538 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05002539 cdb, cdb_length, queue_group, encryption_info_ptr, true);
Kevin Barnett6c223762016-06-27 16:41:00 -05002540}
2541
2542#define PQI_STATUS_IDLE 0x0
2543
2544#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2545#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2546
2547#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2548#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2549#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2550#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2551#define PQI_DEVICE_STATE_ERROR 0x4
2552
2553#define PQI_MODE_READY_TIMEOUT_SECS 30
2554#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2555
2556static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2557{
2558 struct pqi_device_registers __iomem *pqi_registers;
2559 unsigned long timeout;
2560 u64 signature;
2561 u8 status;
2562
2563 pqi_registers = ctrl_info->pqi_registers;
2564 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2565
2566 while (1) {
2567 signature = readq(&pqi_registers->signature);
2568 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2569 sizeof(signature)) == 0)
2570 break;
2571 if (time_after(jiffies, timeout)) {
2572 dev_err(&ctrl_info->pci_dev->dev,
2573 "timed out waiting for PQI signature\n");
2574 return -ETIMEDOUT;
2575 }
2576 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2577 }
2578
2579 while (1) {
2580 status = readb(&pqi_registers->function_and_status_code);
2581 if (status == PQI_STATUS_IDLE)
2582 break;
2583 if (time_after(jiffies, timeout)) {
2584 dev_err(&ctrl_info->pci_dev->dev,
2585 "timed out waiting for PQI IDLE\n");
2586 return -ETIMEDOUT;
2587 }
2588 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2589 }
2590
2591 while (1) {
2592 if (readl(&pqi_registers->device_status) ==
2593 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2594 break;
2595 if (time_after(jiffies, timeout)) {
2596 dev_err(&ctrl_info->pci_dev->dev,
2597 "timed out waiting for PQI all registers ready\n");
2598 return -ETIMEDOUT;
2599 }
2600 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2601 }
2602
2603 return 0;
2604}
2605
2606static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2607{
2608 struct pqi_scsi_dev *device;
2609
2610 device = io_request->scmd->device->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002611 device->raid_bypass_enabled = false;
Kevin Barnett376fb882017-05-03 18:54:43 -05002612 device->aio_enabled = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05002613}
2614
Kevin Barnettd87d5472017-05-03 18:54:00 -05002615static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
Kevin Barnett6c223762016-06-27 16:41:00 -05002616{
2617 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002618 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002619
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002620 device = sdev->hostdata;
2621 if (device->device_offline)
2622 return;
2623
2624 device->device_offline = true;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002625 ctrl_info = shost_to_hba(sdev->host);
2626 pqi_schedule_rescan_worker(ctrl_info);
Dave Carrolla9a68102018-12-07 16:29:37 -06002627 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002628 path, ctrl_info->scsi_host->host_no, device->bus,
2629 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002630}
2631
2632static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2633{
2634 u8 scsi_status;
2635 u8 host_byte;
2636 struct scsi_cmnd *scmd;
2637 struct pqi_raid_error_info *error_info;
2638 size_t sense_data_length;
2639 int residual_count;
2640 int xfer_count;
2641 struct scsi_sense_hdr sshdr;
2642
2643 scmd = io_request->scmd;
2644 if (!scmd)
2645 return;
2646
2647 error_info = io_request->error_info;
2648 scsi_status = error_info->status;
2649 host_byte = DID_OK;
2650
Kevin Barnettf5b63202017-05-03 18:55:07 -05002651 switch (error_info->data_out_result) {
2652 case PQI_DATA_IN_OUT_GOOD:
2653 break;
2654 case PQI_DATA_IN_OUT_UNDERFLOW:
Kevin Barnett6c223762016-06-27 16:41:00 -05002655 xfer_count =
2656 get_unaligned_le32(&error_info->data_out_transferred);
2657 residual_count = scsi_bufflen(scmd) - xfer_count;
2658 scsi_set_resid(scmd, residual_count);
2659 if (xfer_count < scmd->underflow)
2660 host_byte = DID_SOFT_ERROR;
Kevin Barnettf5b63202017-05-03 18:55:07 -05002661 break;
2662 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2663 case PQI_DATA_IN_OUT_ABORTED:
2664 host_byte = DID_ABORT;
2665 break;
2666 case PQI_DATA_IN_OUT_TIMEOUT:
2667 host_byte = DID_TIME_OUT;
2668 break;
2669 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2670 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2671 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2672 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2673 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2674 case PQI_DATA_IN_OUT_ERROR:
2675 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2676 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2677 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2678 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2679 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2680 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2681 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2682 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2683 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2684 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2685 default:
2686 host_byte = DID_ERROR;
2687 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002688 }
2689
2690 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2691 if (sense_data_length == 0)
2692 sense_data_length =
2693 get_unaligned_le16(&error_info->response_data_length);
2694 if (sense_data_length) {
2695 if (sense_data_length > sizeof(error_info->data))
2696 sense_data_length = sizeof(error_info->data);
2697
2698 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2699 scsi_normalize_sense(error_info->data,
2700 sense_data_length, &sshdr) &&
2701 sshdr.sense_key == HARDWARE_ERROR &&
2702 sshdr.asc == 0x3e &&
2703 sshdr.ascq == 0x1) {
Kevin Barnettd87d5472017-05-03 18:54:00 -05002704 pqi_take_device_offline(scmd->device, "RAID");
Kevin Barnett6c223762016-06-27 16:41:00 -05002705 host_byte = DID_NO_CONNECT;
2706 }
2707
2708 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2709 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2710 memcpy(scmd->sense_buffer, error_info->data,
2711 sense_data_length);
2712 }
2713
2714 scmd->result = scsi_status;
2715 set_host_byte(scmd, host_byte);
2716}
2717
2718static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2719{
2720 u8 scsi_status;
2721 u8 host_byte;
2722 struct scsi_cmnd *scmd;
2723 struct pqi_aio_error_info *error_info;
2724 size_t sense_data_length;
2725 int residual_count;
2726 int xfer_count;
2727 bool device_offline;
2728
2729 scmd = io_request->scmd;
2730 error_info = io_request->error_info;
2731 host_byte = DID_OK;
2732 sense_data_length = 0;
2733 device_offline = false;
2734
2735 switch (error_info->service_response) {
2736 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2737 scsi_status = error_info->status;
2738 break;
2739 case PQI_AIO_SERV_RESPONSE_FAILURE:
2740 switch (error_info->status) {
2741 case PQI_AIO_STATUS_IO_ABORTED:
2742 scsi_status = SAM_STAT_TASK_ABORTED;
2743 break;
2744 case PQI_AIO_STATUS_UNDERRUN:
2745 scsi_status = SAM_STAT_GOOD;
2746 residual_count = get_unaligned_le32(
2747 &error_info->residual_count);
2748 scsi_set_resid(scmd, residual_count);
2749 xfer_count = scsi_bufflen(scmd) - residual_count;
2750 if (xfer_count < scmd->underflow)
2751 host_byte = DID_SOFT_ERROR;
2752 break;
2753 case PQI_AIO_STATUS_OVERRUN:
2754 scsi_status = SAM_STAT_GOOD;
2755 break;
2756 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2757 pqi_aio_path_disabled(io_request);
2758 scsi_status = SAM_STAT_GOOD;
2759 io_request->status = -EAGAIN;
2760 break;
2761 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2762 case PQI_AIO_STATUS_INVALID_DEVICE:
Kevin Barnett376fb882017-05-03 18:54:43 -05002763 if (!io_request->raid_bypass) {
2764 device_offline = true;
2765 pqi_take_device_offline(scmd->device, "AIO");
2766 host_byte = DID_NO_CONNECT;
2767 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002768 scsi_status = SAM_STAT_CHECK_CONDITION;
2769 break;
2770 case PQI_AIO_STATUS_IO_ERROR:
2771 default:
2772 scsi_status = SAM_STAT_CHECK_CONDITION;
2773 break;
2774 }
2775 break;
2776 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2777 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2778 scsi_status = SAM_STAT_GOOD;
2779 break;
2780 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2781 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2782 default:
2783 scsi_status = SAM_STAT_CHECK_CONDITION;
2784 break;
2785 }
2786
2787 if (error_info->data_present) {
2788 sense_data_length =
2789 get_unaligned_le16(&error_info->data_length);
2790 if (sense_data_length) {
2791 if (sense_data_length > sizeof(error_info->data))
2792 sense_data_length = sizeof(error_info->data);
2793 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2794 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2795 memcpy(scmd->sense_buffer, error_info->data,
2796 sense_data_length);
2797 }
2798 }
2799
2800 if (device_offline && sense_data_length == 0)
2801 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2802 0x3e, 0x1);
2803
2804 scmd->result = scsi_status;
2805 set_host_byte(scmd, host_byte);
2806}
2807
2808static void pqi_process_io_error(unsigned int iu_type,
2809 struct pqi_io_request *io_request)
2810{
2811 switch (iu_type) {
2812 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2813 pqi_process_raid_io_error(io_request);
2814 break;
2815 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2816 pqi_process_aio_io_error(io_request);
2817 break;
2818 }
2819}
2820
2821static int pqi_interpret_task_management_response(
2822 struct pqi_task_management_response *response)
2823{
2824 int rc;
2825
2826 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002827 case SOP_TMF_COMPLETE:
2828 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002829 rc = 0;
2830 break;
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06002831 case SOP_TMF_REJECTED:
2832 rc = -EAGAIN;
2833 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002834 default:
2835 rc = -EIO;
2836 break;
2837 }
2838
2839 return rc;
2840}
2841
2842static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2843 struct pqi_queue_group *queue_group)
2844{
2845 unsigned int num_responses;
2846 pqi_index_t oq_pi;
2847 pqi_index_t oq_ci;
2848 struct pqi_io_request *io_request;
2849 struct pqi_io_response *response;
2850 u16 request_id;
2851
2852 num_responses = 0;
2853 oq_ci = queue_group->oq_ci_copy;
2854
2855 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002856 oq_pi = readl(queue_group->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05002857 if (oq_pi == oq_ci)
2858 break;
2859
2860 num_responses++;
2861 response = queue_group->oq_element_array +
2862 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2863
2864 request_id = get_unaligned_le16(&response->request_id);
2865 WARN_ON(request_id >= ctrl_info->max_io_slots);
2866
2867 io_request = &ctrl_info->io_request_pool[request_id];
2868 WARN_ON(atomic_read(&io_request->refcount) == 0);
2869
2870 switch (response->header.iu_type) {
2871 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2872 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
Kevin Barnett2ba55c92018-12-07 16:29:51 -06002873 if (io_request->scmd)
2874 io_request->scmd->result = 0;
2875 /* fall through */
Kevin Barnett6c223762016-06-27 16:41:00 -05002876 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2877 break;
Kevin Barnettb212c252018-12-07 16:28:10 -06002878 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2879 io_request->status =
2880 get_unaligned_le16(
2881 &((struct pqi_vendor_general_response *)
2882 response)->status);
2883 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002884 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2885 io_request->status =
2886 pqi_interpret_task_management_response(
2887 (void *)response);
2888 break;
2889 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2890 pqi_aio_path_disabled(io_request);
2891 io_request->status = -EAGAIN;
2892 break;
2893 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2894 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2895 io_request->error_info = ctrl_info->error_buffer +
2896 (get_unaligned_le16(&response->error_index) *
2897 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2898 pqi_process_io_error(response->header.iu_type,
2899 io_request);
2900 break;
2901 default:
2902 dev_err(&ctrl_info->pci_dev->dev,
2903 "unexpected IU type: 0x%x\n",
2904 response->header.iu_type);
Kevin Barnett6c223762016-06-27 16:41:00 -05002905 break;
2906 }
2907
2908 io_request->io_complete_callback(io_request,
2909 io_request->context);
2910
2911 /*
2912 * Note that the I/O request structure CANNOT BE TOUCHED after
2913 * returning from the I/O completion callback!
2914 */
2915
2916 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2917 }
2918
2919 if (num_responses) {
2920 queue_group->oq_ci_copy = oq_ci;
2921 writel(oq_ci, queue_group->oq_ci);
2922 }
2923
2924 return num_responses;
2925}
2926
2927static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002928 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002929{
2930 unsigned int num_elements_used;
2931
2932 if (pi >= ci)
2933 num_elements_used = pi - ci;
2934 else
2935 num_elements_used = elements_in_queue - ci + pi;
2936
2937 return elements_in_queue - num_elements_used - 1;
2938}
2939
Kevin Barnett98f87662017-05-03 18:53:11 -05002940static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05002941 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2942{
2943 pqi_index_t iq_pi;
2944 pqi_index_t iq_ci;
2945 unsigned long flags;
2946 void *next_element;
Kevin Barnett6c223762016-06-27 16:41:00 -05002947 struct pqi_queue_group *queue_group;
2948
2949 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2950 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2951
Kevin Barnett6c223762016-06-27 16:41:00 -05002952 while (1) {
2953 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2954
2955 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002956 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002957
2958 if (pqi_num_elements_free(iq_pi, iq_ci,
2959 ctrl_info->num_elements_per_iq))
2960 break;
2961
2962 spin_unlock_irqrestore(
2963 &queue_group->submit_lock[RAID_PATH], flags);
2964
Kevin Barnett98f87662017-05-03 18:53:11 -05002965 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05002966 return;
Kevin Barnett6c223762016-06-27 16:41:00 -05002967 }
2968
2969 next_element = queue_group->iq_element_array[RAID_PATH] +
2970 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2971
2972 memcpy(next_element, iu, iu_length);
2973
2974 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
Kevin Barnett6c223762016-06-27 16:41:00 -05002975 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2976
2977 /*
2978 * This write notifies the controller that an IU is available to be
2979 * processed.
2980 */
2981 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2982
2983 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002984}
2985
2986static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2987 struct pqi_event *event)
2988{
2989 struct pqi_event_acknowledge_request request;
2990
2991 memset(&request, 0, sizeof(request));
2992
2993 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2994 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2995 &request.header.iu_length);
2996 request.event_type = event->event_type;
2997 request.event_id = event->event_id;
2998 request.additional_event_id = event->additional_event_id;
2999
Kevin Barnett98f87662017-05-03 18:53:11 -05003000 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
Kevin Barnett6c223762016-06-27 16:41:00 -05003001}
3002
3003static void pqi_event_worker(struct work_struct *work)
3004{
3005 unsigned int i;
3006 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003007 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05003008
3009 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3010
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003011 pqi_ctrl_busy(ctrl_info);
3012 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
Kevin Barnett5f310422017-05-03 18:54:55 -05003013 if (pqi_ctrl_offline(ctrl_info))
3014 goto out;
3015
3016 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003017
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003018 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05003019 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003020 if (event->pending) {
3021 event->pending = false;
3022 pqi_acknowledge_event(ctrl_info, event);
Kevin Barnett6c223762016-06-27 16:41:00 -05003023 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003024 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05003025 }
3026
Kevin Barnett5f310422017-05-03 18:54:55 -05003027out:
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003028 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003029}
3030
Kevin Barnett98f87662017-05-03 18:53:11 -05003031#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
Kevin Barnett6c223762016-06-27 16:41:00 -05003032
Kees Cook74a0f572017-10-11 16:27:10 -07003033static void pqi_heartbeat_timer_handler(struct timer_list *t)
Kevin Barnett6c223762016-06-27 16:41:00 -05003034{
3035 int num_interrupts;
Kevin Barnett98f87662017-05-03 18:53:11 -05003036 u32 heartbeat_count;
Kees Cook74a0f572017-10-11 16:27:10 -07003037 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3038 heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003039
Kevin Barnett98f87662017-05-03 18:53:11 -05003040 pqi_check_ctrl_health(ctrl_info);
3041 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett061ef062017-05-03 18:53:05 -05003042 return;
3043
Kevin Barnett6c223762016-06-27 16:41:00 -05003044 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05003045 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003046
3047 if (num_interrupts == ctrl_info->previous_num_interrupts) {
Kevin Barnett98f87662017-05-03 18:53:11 -05003048 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3049 dev_err(&ctrl_info->pci_dev->dev,
3050 "no heartbeat detected - last heartbeat count: %u\n",
3051 heartbeat_count);
Kevin Barnett6c223762016-06-27 16:41:00 -05003052 pqi_take_ctrl_offline(ctrl_info);
3053 return;
3054 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003055 } else {
Kevin Barnett98f87662017-05-03 18:53:11 -05003056 ctrl_info->previous_num_interrupts = num_interrupts;
Kevin Barnett6c223762016-06-27 16:41:00 -05003057 }
3058
Kevin Barnett98f87662017-05-03 18:53:11 -05003059 ctrl_info->previous_heartbeat_count = heartbeat_count;
Kevin Barnett6c223762016-06-27 16:41:00 -05003060 mod_timer(&ctrl_info->heartbeat_timer,
3061 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3062}
3063
3064static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3065{
Kevin Barnett98f87662017-05-03 18:53:11 -05003066 if (!ctrl_info->heartbeat_counter)
3067 return;
3068
Kevin Barnett6c223762016-06-27 16:41:00 -05003069 ctrl_info->previous_num_interrupts =
3070 atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05003071 ctrl_info->previous_heartbeat_count =
3072 pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003073
Kevin Barnett6c223762016-06-27 16:41:00 -05003074 ctrl_info->heartbeat_timer.expires =
3075 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
Kevin Barnett061ef062017-05-03 18:53:05 -05003076 add_timer(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003077}
3078
3079static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3080{
Kevin Barnett98f87662017-05-03 18:53:11 -05003081 del_timer_sync(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003082}
3083
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003084static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05003085{
3086 int index;
3087
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003088 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3089 if (event_type == pqi_supported_event_types[index])
3090 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003091
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003092 return -1;
3093}
3094
3095static inline bool pqi_is_supported_event(unsigned int event_type)
3096{
3097 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05003098}
3099
3100static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3101{
3102 unsigned int num_events;
3103 pqi_index_t oq_pi;
3104 pqi_index_t oq_ci;
3105 struct pqi_event_queue *event_queue;
3106 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003107 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05003108 int event_index;
3109
3110 event_queue = &ctrl_info->event_queue;
3111 num_events = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05003112 oq_ci = event_queue->oq_ci_copy;
3113
3114 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003115 oq_pi = readl(event_queue->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003116 if (oq_pi == oq_ci)
3117 break;
3118
3119 num_events++;
3120 response = event_queue->oq_element_array +
3121 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3122
3123 event_index =
3124 pqi_event_type_to_event_index(response->event_type);
3125
3126 if (event_index >= 0) {
3127 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003128 event = &ctrl_info->events[event_index];
3129 event->pending = true;
3130 event->event_type = response->event_type;
3131 event->event_id = response->event_id;
3132 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05003133 response->additional_event_id;
Kevin Barnett6c223762016-06-27 16:41:00 -05003134 }
3135 }
3136
3137 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3138 }
3139
3140 if (num_events) {
3141 event_queue->oq_ci_copy = oq_ci;
3142 writel(oq_ci, event_queue->oq_ci);
Kevin Barnett98f87662017-05-03 18:53:11 -05003143 schedule_work(&ctrl_info->event_work);
Kevin Barnett6c223762016-06-27 16:41:00 -05003144 }
3145
3146 return num_events;
3147}
3148
Kevin Barnett061ef062017-05-03 18:53:05 -05003149#define PQI_LEGACY_INTX_MASK 0x1
3150
3151static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3152 bool enable_intx)
3153{
3154 u32 intx_mask;
3155 struct pqi_device_registers __iomem *pqi_registers;
3156 volatile void __iomem *register_addr;
3157
3158 pqi_registers = ctrl_info->pqi_registers;
3159
3160 if (enable_intx)
3161 register_addr = &pqi_registers->legacy_intx_mask_clear;
3162 else
3163 register_addr = &pqi_registers->legacy_intx_mask_set;
3164
3165 intx_mask = readl(register_addr);
3166 intx_mask |= PQI_LEGACY_INTX_MASK;
3167 writel(intx_mask, register_addr);
3168}
3169
3170static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3171 enum pqi_irq_mode new_mode)
3172{
3173 switch (ctrl_info->irq_mode) {
3174 case IRQ_MODE_MSIX:
3175 switch (new_mode) {
3176 case IRQ_MODE_MSIX:
3177 break;
3178 case IRQ_MODE_INTX:
3179 pqi_configure_legacy_intx(ctrl_info, true);
Kevin Barnett061ef062017-05-03 18:53:05 -05003180 sis_enable_intx(ctrl_info);
3181 break;
3182 case IRQ_MODE_NONE:
Kevin Barnett061ef062017-05-03 18:53:05 -05003183 break;
3184 }
3185 break;
3186 case IRQ_MODE_INTX:
3187 switch (new_mode) {
3188 case IRQ_MODE_MSIX:
3189 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003190 sis_enable_msix(ctrl_info);
3191 break;
3192 case IRQ_MODE_INTX:
3193 break;
3194 case IRQ_MODE_NONE:
3195 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003196 break;
3197 }
3198 break;
3199 case IRQ_MODE_NONE:
3200 switch (new_mode) {
3201 case IRQ_MODE_MSIX:
3202 sis_enable_msix(ctrl_info);
3203 break;
3204 case IRQ_MODE_INTX:
3205 pqi_configure_legacy_intx(ctrl_info, true);
3206 sis_enable_intx(ctrl_info);
3207 break;
3208 case IRQ_MODE_NONE:
3209 break;
3210 }
3211 break;
3212 }
3213
3214 ctrl_info->irq_mode = new_mode;
3215}
3216
3217#define PQI_LEGACY_INTX_PENDING 0x1
3218
3219static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3220{
3221 bool valid_irq;
3222 u32 intx_status;
3223
3224 switch (ctrl_info->irq_mode) {
3225 case IRQ_MODE_MSIX:
3226 valid_irq = true;
3227 break;
3228 case IRQ_MODE_INTX:
3229 intx_status =
3230 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3231 if (intx_status & PQI_LEGACY_INTX_PENDING)
3232 valid_irq = true;
3233 else
3234 valid_irq = false;
3235 break;
3236 case IRQ_MODE_NONE:
3237 default:
3238 valid_irq = false;
3239 break;
3240 }
3241
3242 return valid_irq;
3243}
3244
Kevin Barnett6c223762016-06-27 16:41:00 -05003245static irqreturn_t pqi_irq_handler(int irq, void *data)
3246{
3247 struct pqi_ctrl_info *ctrl_info;
3248 struct pqi_queue_group *queue_group;
3249 unsigned int num_responses_handled;
3250
3251 queue_group = data;
3252 ctrl_info = queue_group->ctrl_info;
3253
Kevin Barnett061ef062017-05-03 18:53:05 -05003254 if (!pqi_is_valid_irq(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003255 return IRQ_NONE;
3256
3257 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3258
3259 if (irq == ctrl_info->event_irq)
3260 num_responses_handled += pqi_process_event_intr(ctrl_info);
3261
3262 if (num_responses_handled)
3263 atomic_inc(&ctrl_info->num_interrupts);
3264
3265 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3266 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3267
3268 return IRQ_HANDLED;
3269}
3270
3271static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3272{
Kevin Barnettd91d7822017-05-03 18:53:30 -05003273 struct pci_dev *pci_dev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05003274 int i;
3275 int rc;
3276
Kevin Barnettd91d7822017-05-03 18:53:30 -05003277 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05003278
3279 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003280 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
Christoph Hellwig52198222016-11-01 08:12:49 -06003281 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003282 if (rc) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003283 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05003284 "irq %u init failed with error %d\n",
Kevin Barnettd91d7822017-05-03 18:53:30 -05003285 pci_irq_vector(pci_dev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05003286 return rc;
3287 }
3288 ctrl_info->num_msix_vectors_initialized++;
3289 }
3290
3291 return 0;
3292}
3293
Kevin Barnett98bf0612017-05-03 18:52:28 -05003294static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3295{
3296 int i;
3297
3298 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3299 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3300 &ctrl_info->queue_groups[i]);
3301
3302 ctrl_info->num_msix_vectors_initialized = 0;
3303}
3304
Kevin Barnett6c223762016-06-27 16:41:00 -05003305static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3306{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003307 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003308
Kevin Barnett98bf0612017-05-03 18:52:28 -05003309 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003310 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3311 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003312 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003313 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003314 "MSI-X init failed with error %d\n",
3315 num_vectors_enabled);
3316 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003317 }
3318
Kevin Barnett98bf0612017-05-03 18:52:28 -05003319 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
Kevin Barnett061ef062017-05-03 18:53:05 -05003320 ctrl_info->irq_mode = IRQ_MODE_MSIX;
Kevin Barnett6c223762016-06-27 16:41:00 -05003321 return 0;
3322}
3323
Kevin Barnett98bf0612017-05-03 18:52:28 -05003324static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3325{
3326 if (ctrl_info->num_msix_vectors_enabled) {
3327 pci_free_irq_vectors(ctrl_info->pci_dev);
3328 ctrl_info->num_msix_vectors_enabled = 0;
3329 }
3330}
3331
Kevin Barnett6c223762016-06-27 16:41:00 -05003332static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3333{
3334 unsigned int i;
3335 size_t alloc_length;
3336 size_t element_array_length_per_iq;
3337 size_t element_array_length_per_oq;
3338 void *element_array;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003339 void __iomem *next_queue_index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003340 void *aligned_pointer;
3341 unsigned int num_inbound_queues;
3342 unsigned int num_outbound_queues;
3343 unsigned int num_queue_indexes;
3344 struct pqi_queue_group *queue_group;
3345
3346 element_array_length_per_iq =
3347 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3348 ctrl_info->num_elements_per_iq;
3349 element_array_length_per_oq =
3350 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3351 ctrl_info->num_elements_per_oq;
3352 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3353 num_outbound_queues = ctrl_info->num_queue_groups;
3354 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3355
3356 aligned_pointer = NULL;
3357
3358 for (i = 0; i < num_inbound_queues; i++) {
3359 aligned_pointer = PTR_ALIGN(aligned_pointer,
3360 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3361 aligned_pointer += element_array_length_per_iq;
3362 }
3363
3364 for (i = 0; i < num_outbound_queues; i++) {
3365 aligned_pointer = PTR_ALIGN(aligned_pointer,
3366 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3367 aligned_pointer += element_array_length_per_oq;
3368 }
3369
3370 aligned_pointer = PTR_ALIGN(aligned_pointer,
3371 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3372 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3373 PQI_EVENT_OQ_ELEMENT_LENGTH;
3374
3375 for (i = 0; i < num_queue_indexes; i++) {
3376 aligned_pointer = PTR_ALIGN(aligned_pointer,
3377 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3378 aligned_pointer += sizeof(pqi_index_t);
3379 }
3380
3381 alloc_length = (size_t)aligned_pointer +
3382 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3383
Kevin Barnette1d213b2017-05-03 18:53:18 -05003384 alloc_length += PQI_EXTRA_SGL_MEMORY;
3385
Kevin Barnett6c223762016-06-27 16:41:00 -05003386 ctrl_info->queue_memory_base =
3387 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3388 alloc_length,
3389 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3390
Kevin Barnettd87d5472017-05-03 18:54:00 -05003391 if (!ctrl_info->queue_memory_base)
Kevin Barnett6c223762016-06-27 16:41:00 -05003392 return -ENOMEM;
Kevin Barnett6c223762016-06-27 16:41:00 -05003393
3394 ctrl_info->queue_memory_length = alloc_length;
3395
3396 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3397 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3398
3399 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3400 queue_group = &ctrl_info->queue_groups[i];
3401 queue_group->iq_element_array[RAID_PATH] = element_array;
3402 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3403 ctrl_info->queue_memory_base_dma_handle +
3404 (element_array - ctrl_info->queue_memory_base);
3405 element_array += element_array_length_per_iq;
3406 element_array = PTR_ALIGN(element_array,
3407 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3408 queue_group->iq_element_array[AIO_PATH] = element_array;
3409 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3410 ctrl_info->queue_memory_base_dma_handle +
3411 (element_array - ctrl_info->queue_memory_base);
3412 element_array += element_array_length_per_iq;
3413 element_array = PTR_ALIGN(element_array,
3414 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3415 }
3416
3417 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3418 queue_group = &ctrl_info->queue_groups[i];
3419 queue_group->oq_element_array = element_array;
3420 queue_group->oq_element_array_bus_addr =
3421 ctrl_info->queue_memory_base_dma_handle +
3422 (element_array - ctrl_info->queue_memory_base);
3423 element_array += element_array_length_per_oq;
3424 element_array = PTR_ALIGN(element_array,
3425 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3426 }
3427
3428 ctrl_info->event_queue.oq_element_array = element_array;
3429 ctrl_info->event_queue.oq_element_array_bus_addr =
3430 ctrl_info->queue_memory_base_dma_handle +
3431 (element_array - ctrl_info->queue_memory_base);
3432 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3433 PQI_EVENT_OQ_ELEMENT_LENGTH;
3434
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003435 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
Kevin Barnett6c223762016-06-27 16:41:00 -05003436 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3437
3438 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3439 queue_group = &ctrl_info->queue_groups[i];
3440 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3441 queue_group->iq_ci_bus_addr[RAID_PATH] =
3442 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003443 (next_queue_index -
3444 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003445 next_queue_index += sizeof(pqi_index_t);
3446 next_queue_index = PTR_ALIGN(next_queue_index,
3447 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3448 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3449 queue_group->iq_ci_bus_addr[AIO_PATH] =
3450 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003451 (next_queue_index -
3452 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003453 next_queue_index += sizeof(pqi_index_t);
3454 next_queue_index = PTR_ALIGN(next_queue_index,
3455 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3456 queue_group->oq_pi = next_queue_index;
3457 queue_group->oq_pi_bus_addr =
3458 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003459 (next_queue_index -
3460 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003461 next_queue_index += sizeof(pqi_index_t);
3462 next_queue_index = PTR_ALIGN(next_queue_index,
3463 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3464 }
3465
3466 ctrl_info->event_queue.oq_pi = next_queue_index;
3467 ctrl_info->event_queue.oq_pi_bus_addr =
3468 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003469 (next_queue_index -
3470 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003471
3472 return 0;
3473}
3474
3475static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3476{
3477 unsigned int i;
3478 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3479 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3480
3481 /*
3482 * Initialize the backpointers to the controller structure in
3483 * each operational queue group structure.
3484 */
3485 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3486 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3487
3488 /*
3489 * Assign IDs to all operational queues. Note that the IDs
3490 * assigned to operational IQs are independent of the IDs
3491 * assigned to operational OQs.
3492 */
3493 ctrl_info->event_queue.oq_id = next_oq_id++;
3494 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3495 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3496 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3497 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3498 }
3499
3500 /*
3501 * Assign MSI-X table entry indexes to all queues. Note that the
3502 * interrupt for the event queue is shared with the first queue group.
3503 */
3504 ctrl_info->event_queue.int_msg_num = 0;
3505 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3506 ctrl_info->queue_groups[i].int_msg_num = i;
3507
3508 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3509 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3510 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3511 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3512 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3513 }
3514}
3515
3516static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3517{
3518 size_t alloc_length;
3519 struct pqi_admin_queues_aligned *admin_queues_aligned;
3520 struct pqi_admin_queues *admin_queues;
3521
3522 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3523 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3524
3525 ctrl_info->admin_queue_memory_base =
3526 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3527 alloc_length,
3528 &ctrl_info->admin_queue_memory_base_dma_handle,
3529 GFP_KERNEL);
3530
3531 if (!ctrl_info->admin_queue_memory_base)
3532 return -ENOMEM;
3533
3534 ctrl_info->admin_queue_memory_length = alloc_length;
3535
3536 admin_queues = &ctrl_info->admin_queues;
3537 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3538 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3539 admin_queues->iq_element_array =
3540 &admin_queues_aligned->iq_element_array;
3541 admin_queues->oq_element_array =
3542 &admin_queues_aligned->oq_element_array;
3543 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003544 admin_queues->oq_pi =
3545 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
Kevin Barnett6c223762016-06-27 16:41:00 -05003546
3547 admin_queues->iq_element_array_bus_addr =
3548 ctrl_info->admin_queue_memory_base_dma_handle +
3549 (admin_queues->iq_element_array -
3550 ctrl_info->admin_queue_memory_base);
3551 admin_queues->oq_element_array_bus_addr =
3552 ctrl_info->admin_queue_memory_base_dma_handle +
3553 (admin_queues->oq_element_array -
3554 ctrl_info->admin_queue_memory_base);
3555 admin_queues->iq_ci_bus_addr =
3556 ctrl_info->admin_queue_memory_base_dma_handle +
3557 ((void *)admin_queues->iq_ci -
3558 ctrl_info->admin_queue_memory_base);
3559 admin_queues->oq_pi_bus_addr =
3560 ctrl_info->admin_queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003561 ((void __iomem *)admin_queues->oq_pi -
3562 (void __iomem *)ctrl_info->admin_queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003563
3564 return 0;
3565}
3566
3567#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3568#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3569
3570static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3571{
3572 struct pqi_device_registers __iomem *pqi_registers;
3573 struct pqi_admin_queues *admin_queues;
3574 unsigned long timeout;
3575 u8 status;
3576 u32 reg;
3577
3578 pqi_registers = ctrl_info->pqi_registers;
3579 admin_queues = &ctrl_info->admin_queues;
3580
3581 writeq((u64)admin_queues->iq_element_array_bus_addr,
3582 &pqi_registers->admin_iq_element_array_addr);
3583 writeq((u64)admin_queues->oq_element_array_bus_addr,
3584 &pqi_registers->admin_oq_element_array_addr);
3585 writeq((u64)admin_queues->iq_ci_bus_addr,
3586 &pqi_registers->admin_iq_ci_addr);
3587 writeq((u64)admin_queues->oq_pi_bus_addr,
3588 &pqi_registers->admin_oq_pi_addr);
3589
3590 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3591 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3592 (admin_queues->int_msg_num << 16);
3593 writel(reg, &pqi_registers->admin_iq_num_elements);
3594 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3595 &pqi_registers->function_and_status_code);
3596
3597 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3598 while (1) {
3599 status = readb(&pqi_registers->function_and_status_code);
3600 if (status == PQI_STATUS_IDLE)
3601 break;
3602 if (time_after(jiffies, timeout))
3603 return -ETIMEDOUT;
3604 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3605 }
3606
3607 /*
3608 * The offset registers are not initialized to the correct
3609 * offsets until *after* the create admin queue pair command
3610 * completes successfully.
3611 */
3612 admin_queues->iq_pi = ctrl_info->iomem_base +
3613 PQI_DEVICE_REGISTERS_OFFSET +
3614 readq(&pqi_registers->admin_iq_pi_offset);
3615 admin_queues->oq_ci = ctrl_info->iomem_base +
3616 PQI_DEVICE_REGISTERS_OFFSET +
3617 readq(&pqi_registers->admin_oq_ci_offset);
3618
3619 return 0;
3620}
3621
3622static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3623 struct pqi_general_admin_request *request)
3624{
3625 struct pqi_admin_queues *admin_queues;
3626 void *next_element;
3627 pqi_index_t iq_pi;
3628
3629 admin_queues = &ctrl_info->admin_queues;
3630 iq_pi = admin_queues->iq_pi_copy;
3631
3632 next_element = admin_queues->iq_element_array +
3633 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3634
3635 memcpy(next_element, request, sizeof(*request));
3636
3637 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3638 admin_queues->iq_pi_copy = iq_pi;
3639
3640 /*
3641 * This write notifies the controller that an IU is available to be
3642 * processed.
3643 */
3644 writel(iq_pi, admin_queues->iq_pi);
3645}
3646
Kevin Barnett13bede62017-05-03 18:55:13 -05003647#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3648
Kevin Barnett6c223762016-06-27 16:41:00 -05003649static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3650 struct pqi_general_admin_response *response)
3651{
3652 struct pqi_admin_queues *admin_queues;
3653 pqi_index_t oq_pi;
3654 pqi_index_t oq_ci;
3655 unsigned long timeout;
3656
3657 admin_queues = &ctrl_info->admin_queues;
3658 oq_ci = admin_queues->oq_ci_copy;
3659
Kevin Barnett13bede62017-05-03 18:55:13 -05003660 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
Kevin Barnett6c223762016-06-27 16:41:00 -05003661
3662 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003663 oq_pi = readl(admin_queues->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003664 if (oq_pi != oq_ci)
3665 break;
3666 if (time_after(jiffies, timeout)) {
3667 dev_err(&ctrl_info->pci_dev->dev,
3668 "timed out waiting for admin response\n");
3669 return -ETIMEDOUT;
3670 }
Kevin Barnett13bede62017-05-03 18:55:13 -05003671 if (!sis_is_firmware_running(ctrl_info))
3672 return -ENXIO;
Kevin Barnett6c223762016-06-27 16:41:00 -05003673 usleep_range(1000, 2000);
3674 }
3675
3676 memcpy(response, admin_queues->oq_element_array +
3677 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3678
3679 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3680 admin_queues->oq_ci_copy = oq_ci;
3681 writel(oq_ci, admin_queues->oq_ci);
3682
3683 return 0;
3684}
3685
3686static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3687 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3688 struct pqi_io_request *io_request)
3689{
3690 struct pqi_io_request *next;
3691 void *next_element;
3692 pqi_index_t iq_pi;
3693 pqi_index_t iq_ci;
3694 size_t iu_length;
3695 unsigned long flags;
3696 unsigned int num_elements_needed;
3697 unsigned int num_elements_to_end_of_queue;
3698 size_t copy_count;
3699 struct pqi_iu_header *request;
3700
3701 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3702
Kevin Barnett376fb882017-05-03 18:54:43 -05003703 if (io_request) {
3704 io_request->queue_group = queue_group;
Kevin Barnett6c223762016-06-27 16:41:00 -05003705 list_add_tail(&io_request->request_list_entry,
3706 &queue_group->request_list[path]);
Kevin Barnett376fb882017-05-03 18:54:43 -05003707 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003708
3709 iq_pi = queue_group->iq_pi_copy[path];
3710
3711 list_for_each_entry_safe(io_request, next,
3712 &queue_group->request_list[path], request_list_entry) {
3713
3714 request = io_request->iu;
3715
3716 iu_length = get_unaligned_le16(&request->iu_length) +
3717 PQI_REQUEST_HEADER_LENGTH;
3718 num_elements_needed =
3719 DIV_ROUND_UP(iu_length,
3720 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3721
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003722 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003723
3724 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3725 ctrl_info->num_elements_per_iq))
3726 break;
3727
3728 put_unaligned_le16(queue_group->oq_id,
3729 &request->response_queue_id);
3730
3731 next_element = queue_group->iq_element_array[path] +
3732 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3733
3734 num_elements_to_end_of_queue =
3735 ctrl_info->num_elements_per_iq - iq_pi;
3736
3737 if (num_elements_needed <= num_elements_to_end_of_queue) {
3738 memcpy(next_element, request, iu_length);
3739 } else {
3740 copy_count = num_elements_to_end_of_queue *
3741 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3742 memcpy(next_element, request, copy_count);
3743 memcpy(queue_group->iq_element_array[path],
3744 (u8 *)request + copy_count,
3745 iu_length - copy_count);
3746 }
3747
3748 iq_pi = (iq_pi + num_elements_needed) %
3749 ctrl_info->num_elements_per_iq;
3750
3751 list_del(&io_request->request_list_entry);
3752 }
3753
3754 if (iq_pi != queue_group->iq_pi_copy[path]) {
3755 queue_group->iq_pi_copy[path] = iq_pi;
3756 /*
3757 * This write notifies the controller that one or more IUs are
3758 * available to be processed.
3759 */
3760 writel(iq_pi, queue_group->iq_pi[path]);
3761 }
3762
3763 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3764}
3765
Kevin Barnett1f37e992017-05-03 18:53:24 -05003766#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3767
3768static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3769 struct completion *wait)
3770{
3771 int rc;
Kevin Barnett1f37e992017-05-03 18:53:24 -05003772
3773 while (1) {
3774 if (wait_for_completion_io_timeout(wait,
3775 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3776 rc = 0;
3777 break;
3778 }
3779
3780 pqi_check_ctrl_health(ctrl_info);
3781 if (pqi_ctrl_offline(ctrl_info)) {
3782 rc = -ENXIO;
3783 break;
3784 }
Kevin Barnett1f37e992017-05-03 18:53:24 -05003785 }
3786
3787 return rc;
3788}
3789
Kevin Barnett6c223762016-06-27 16:41:00 -05003790static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3791 void *context)
3792{
3793 struct completion *waiting = context;
3794
3795 complete(waiting);
3796}
3797
Kevin Barnett26b390a2018-06-18 13:22:48 -05003798static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3799 *error_info)
3800{
3801 int rc = -EIO;
3802
3803 switch (error_info->data_out_result) {
3804 case PQI_DATA_IN_OUT_GOOD:
3805 if (error_info->status == SAM_STAT_GOOD)
3806 rc = 0;
3807 break;
3808 case PQI_DATA_IN_OUT_UNDERFLOW:
3809 if (error_info->status == SAM_STAT_GOOD ||
3810 error_info->status == SAM_STAT_CHECK_CONDITION)
3811 rc = 0;
3812 break;
3813 case PQI_DATA_IN_OUT_ABORTED:
3814 rc = PQI_CMD_STATUS_ABORTED;
3815 break;
3816 }
3817
3818 return rc;
3819}
3820
Kevin Barnett6c223762016-06-27 16:41:00 -05003821static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3822 struct pqi_iu_header *request, unsigned int flags,
3823 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3824{
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003825 int rc = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05003826 struct pqi_io_request *io_request;
3827 unsigned long start_jiffies;
3828 unsigned long msecs_blocked;
3829 size_t iu_length;
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003830 DECLARE_COMPLETION_ONSTACK(wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05003831
3832 /*
3833 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3834 * are mutually exclusive.
3835 */
3836
3837 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3838 if (down_interruptible(&ctrl_info->sync_request_sem))
3839 return -ERESTARTSYS;
3840 } else {
3841 if (timeout_msecs == NO_TIMEOUT) {
3842 down(&ctrl_info->sync_request_sem);
3843 } else {
3844 start_jiffies = jiffies;
3845 if (down_timeout(&ctrl_info->sync_request_sem,
3846 msecs_to_jiffies(timeout_msecs)))
3847 return -ETIMEDOUT;
3848 msecs_blocked =
3849 jiffies_to_msecs(jiffies - start_jiffies);
3850 if (msecs_blocked >= timeout_msecs)
3851 return -ETIMEDOUT;
3852 timeout_msecs -= msecs_blocked;
3853 }
3854 }
3855
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003856 pqi_ctrl_busy(ctrl_info);
3857 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3858 if (timeout_msecs == 0) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003859 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003860 rc = -ETIMEDOUT;
3861 goto out;
3862 }
3863
Kevin Barnett376fb882017-05-03 18:54:43 -05003864 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003865 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05003866 rc = -ENXIO;
3867 goto out;
3868 }
3869
Kevin Barnett6c223762016-06-27 16:41:00 -05003870 io_request = pqi_alloc_io_request(ctrl_info);
3871
3872 put_unaligned_le16(io_request->index,
3873 &(((struct pqi_raid_path_request *)request)->request_id));
3874
3875 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3876 ((struct pqi_raid_path_request *)request)->error_index =
3877 ((struct pqi_raid_path_request *)request)->request_id;
3878
3879 iu_length = get_unaligned_le16(&request->iu_length) +
3880 PQI_REQUEST_HEADER_LENGTH;
3881 memcpy(io_request->iu, request, iu_length);
3882
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003883 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3884 io_request->context = &wait;
3885
3886 pqi_start_io(ctrl_info,
3887 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3888 io_request);
3889
3890 pqi_ctrl_unbusy(ctrl_info);
3891
3892 if (timeout_msecs == NO_TIMEOUT) {
3893 pqi_wait_for_completion_io(ctrl_info, &wait);
3894 } else {
3895 if (!wait_for_completion_io_timeout(&wait,
3896 msecs_to_jiffies(timeout_msecs))) {
3897 dev_warn(&ctrl_info->pci_dev->dev,
3898 "command timed out\n");
3899 rc = -ETIMEDOUT;
3900 }
3901 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003902
3903 if (error_info) {
3904 if (io_request->error_info)
3905 memcpy(error_info, io_request->error_info,
3906 sizeof(*error_info));
3907 else
3908 memset(error_info, 0, sizeof(*error_info));
3909 } else if (rc == 0 && io_request->error_info) {
Kevin Barnett26b390a2018-06-18 13:22:48 -05003910 rc = pqi_process_raid_io_error_synchronous(
3911 io_request->error_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003912 }
3913
3914 pqi_free_io_request(io_request);
3915
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003916out:
Kevin Barnett6c223762016-06-27 16:41:00 -05003917 up(&ctrl_info->sync_request_sem);
3918
3919 return rc;
3920}
3921
3922static int pqi_validate_admin_response(
3923 struct pqi_general_admin_response *response, u8 expected_function_code)
3924{
3925 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3926 return -EINVAL;
3927
3928 if (get_unaligned_le16(&response->header.iu_length) !=
3929 PQI_GENERAL_ADMIN_IU_LENGTH)
3930 return -EINVAL;
3931
3932 if (response->function_code != expected_function_code)
3933 return -EINVAL;
3934
3935 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3936 return -EINVAL;
3937
3938 return 0;
3939}
3940
3941static int pqi_submit_admin_request_synchronous(
3942 struct pqi_ctrl_info *ctrl_info,
3943 struct pqi_general_admin_request *request,
3944 struct pqi_general_admin_response *response)
3945{
3946 int rc;
3947
3948 pqi_submit_admin_request(ctrl_info, request);
3949
3950 rc = pqi_poll_for_admin_response(ctrl_info, response);
3951
3952 if (rc == 0)
3953 rc = pqi_validate_admin_response(response,
3954 request->function_code);
3955
3956 return rc;
3957}
3958
3959static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3960{
3961 int rc;
3962 struct pqi_general_admin_request request;
3963 struct pqi_general_admin_response response;
3964 struct pqi_device_capability *capability;
3965 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3966
3967 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3968 if (!capability)
3969 return -ENOMEM;
3970
3971 memset(&request, 0, sizeof(request));
3972
3973 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3974 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3975 &request.header.iu_length);
3976 request.function_code =
3977 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3978 put_unaligned_le32(sizeof(*capability),
3979 &request.data.report_device_capability.buffer_length);
3980
3981 rc = pqi_map_single(ctrl_info->pci_dev,
3982 &request.data.report_device_capability.sg_descriptor,
3983 capability, sizeof(*capability),
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02003984 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05003985 if (rc)
3986 goto out;
3987
3988 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3989 &response);
3990
3991 pqi_pci_unmap(ctrl_info->pci_dev,
3992 &request.data.report_device_capability.sg_descriptor, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02003993 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05003994
3995 if (rc)
3996 goto out;
3997
3998 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3999 rc = -EIO;
4000 goto out;
4001 }
4002
4003 ctrl_info->max_inbound_queues =
4004 get_unaligned_le16(&capability->max_inbound_queues);
4005 ctrl_info->max_elements_per_iq =
4006 get_unaligned_le16(&capability->max_elements_per_iq);
4007 ctrl_info->max_iq_element_length =
4008 get_unaligned_le16(&capability->max_iq_element_length)
4009 * 16;
4010 ctrl_info->max_outbound_queues =
4011 get_unaligned_le16(&capability->max_outbound_queues);
4012 ctrl_info->max_elements_per_oq =
4013 get_unaligned_le16(&capability->max_elements_per_oq);
4014 ctrl_info->max_oq_element_length =
4015 get_unaligned_le16(&capability->max_oq_element_length)
4016 * 16;
4017
4018 sop_iu_layer_descriptor =
4019 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4020
4021 ctrl_info->max_inbound_iu_length_per_firmware =
4022 get_unaligned_le16(
4023 &sop_iu_layer_descriptor->max_inbound_iu_length);
4024 ctrl_info->inbound_spanning_supported =
4025 sop_iu_layer_descriptor->inbound_spanning_supported;
4026 ctrl_info->outbound_spanning_supported =
4027 sop_iu_layer_descriptor->outbound_spanning_supported;
4028
4029out:
4030 kfree(capability);
4031
4032 return rc;
4033}
4034
4035static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4036{
4037 if (ctrl_info->max_iq_element_length <
4038 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4039 dev_err(&ctrl_info->pci_dev->dev,
4040 "max. inbound queue element length of %d is less than the required length of %d\n",
4041 ctrl_info->max_iq_element_length,
4042 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4043 return -EINVAL;
4044 }
4045
4046 if (ctrl_info->max_oq_element_length <
4047 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4048 dev_err(&ctrl_info->pci_dev->dev,
4049 "max. outbound queue element length of %d is less than the required length of %d\n",
4050 ctrl_info->max_oq_element_length,
4051 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4052 return -EINVAL;
4053 }
4054
4055 if (ctrl_info->max_inbound_iu_length_per_firmware <
4056 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4057 dev_err(&ctrl_info->pci_dev->dev,
4058 "max. inbound IU length of %u is less than the min. required length of %d\n",
4059 ctrl_info->max_inbound_iu_length_per_firmware,
4060 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4061 return -EINVAL;
4062 }
4063
Kevin Barnett77668f42016-08-31 14:54:23 -05004064 if (!ctrl_info->inbound_spanning_supported) {
4065 dev_err(&ctrl_info->pci_dev->dev,
4066 "the controller does not support inbound spanning\n");
4067 return -EINVAL;
4068 }
4069
4070 if (ctrl_info->outbound_spanning_supported) {
4071 dev_err(&ctrl_info->pci_dev->dev,
4072 "the controller supports outbound spanning but this driver does not\n");
4073 return -EINVAL;
4074 }
4075
Kevin Barnett6c223762016-06-27 16:41:00 -05004076 return 0;
4077}
4078
Kevin Barnett6c223762016-06-27 16:41:00 -05004079static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4080{
4081 int rc;
4082 struct pqi_event_queue *event_queue;
4083 struct pqi_general_admin_request request;
4084 struct pqi_general_admin_response response;
4085
4086 event_queue = &ctrl_info->event_queue;
4087
4088 /*
4089 * Create OQ (Outbound Queue - device to host queue) to dedicate
4090 * to events.
4091 */
4092 memset(&request, 0, sizeof(request));
4093 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4094 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4095 &request.header.iu_length);
4096 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4097 put_unaligned_le16(event_queue->oq_id,
4098 &request.data.create_operational_oq.queue_id);
4099 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4100 &request.data.create_operational_oq.element_array_addr);
4101 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4102 &request.data.create_operational_oq.pi_addr);
4103 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4104 &request.data.create_operational_oq.num_elements);
4105 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4106 &request.data.create_operational_oq.element_length);
4107 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4108 put_unaligned_le16(event_queue->int_msg_num,
4109 &request.data.create_operational_oq.int_msg_num);
4110
4111 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4112 &response);
4113 if (rc)
4114 return rc;
4115
4116 event_queue->oq_ci = ctrl_info->iomem_base +
4117 PQI_DEVICE_REGISTERS_OFFSET +
4118 get_unaligned_le64(
4119 &response.data.create_operational_oq.oq_ci_offset);
4120
4121 return 0;
4122}
4123
Kevin Barnett061ef062017-05-03 18:53:05 -05004124static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4125 unsigned int group_number)
Kevin Barnett6c223762016-06-27 16:41:00 -05004126{
Kevin Barnett6c223762016-06-27 16:41:00 -05004127 int rc;
4128 struct pqi_queue_group *queue_group;
4129 struct pqi_general_admin_request request;
4130 struct pqi_general_admin_response response;
4131
Kevin Barnett061ef062017-05-03 18:53:05 -05004132 queue_group = &ctrl_info->queue_groups[group_number];
Kevin Barnett6c223762016-06-27 16:41:00 -05004133
4134 /*
4135 * Create IQ (Inbound Queue - host to device queue) for
4136 * RAID path.
4137 */
4138 memset(&request, 0, sizeof(request));
4139 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4140 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4141 &request.header.iu_length);
4142 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4143 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4144 &request.data.create_operational_iq.queue_id);
4145 put_unaligned_le64(
4146 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4147 &request.data.create_operational_iq.element_array_addr);
4148 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4149 &request.data.create_operational_iq.ci_addr);
4150 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4151 &request.data.create_operational_iq.num_elements);
4152 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4153 &request.data.create_operational_iq.element_length);
4154 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4155
4156 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4157 &response);
4158 if (rc) {
4159 dev_err(&ctrl_info->pci_dev->dev,
4160 "error creating inbound RAID queue\n");
4161 return rc;
4162 }
4163
4164 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4165 PQI_DEVICE_REGISTERS_OFFSET +
4166 get_unaligned_le64(
4167 &response.data.create_operational_iq.iq_pi_offset);
4168
4169 /*
4170 * Create IQ (Inbound Queue - host to device queue) for
4171 * Advanced I/O (AIO) path.
4172 */
4173 memset(&request, 0, sizeof(request));
4174 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4175 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4176 &request.header.iu_length);
4177 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4178 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4179 &request.data.create_operational_iq.queue_id);
4180 put_unaligned_le64((u64)queue_group->
4181 iq_element_array_bus_addr[AIO_PATH],
4182 &request.data.create_operational_iq.element_array_addr);
4183 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4184 &request.data.create_operational_iq.ci_addr);
4185 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4186 &request.data.create_operational_iq.num_elements);
4187 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4188 &request.data.create_operational_iq.element_length);
4189 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4190
4191 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4192 &response);
4193 if (rc) {
4194 dev_err(&ctrl_info->pci_dev->dev,
4195 "error creating inbound AIO queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004196 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004197 }
4198
4199 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4200 PQI_DEVICE_REGISTERS_OFFSET +
4201 get_unaligned_le64(
4202 &response.data.create_operational_iq.iq_pi_offset);
4203
4204 /*
4205 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4206 * assumed to be for RAID path I/O unless we change the queue's
4207 * property.
4208 */
4209 memset(&request, 0, sizeof(request));
4210 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4211 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4212 &request.header.iu_length);
4213 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4214 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4215 &request.data.change_operational_iq_properties.queue_id);
4216 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4217 &request.data.change_operational_iq_properties.vendor_specific);
4218
4219 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4220 &response);
4221 if (rc) {
4222 dev_err(&ctrl_info->pci_dev->dev,
4223 "error changing queue property\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004224 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004225 }
4226
4227 /*
4228 * Create OQ (Outbound Queue - device to host queue).
4229 */
4230 memset(&request, 0, sizeof(request));
4231 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4232 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4233 &request.header.iu_length);
4234 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4235 put_unaligned_le16(queue_group->oq_id,
4236 &request.data.create_operational_oq.queue_id);
4237 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4238 &request.data.create_operational_oq.element_array_addr);
4239 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4240 &request.data.create_operational_oq.pi_addr);
4241 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4242 &request.data.create_operational_oq.num_elements);
4243 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4244 &request.data.create_operational_oq.element_length);
4245 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4246 put_unaligned_le16(queue_group->int_msg_num,
4247 &request.data.create_operational_oq.int_msg_num);
4248
4249 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4250 &response);
4251 if (rc) {
4252 dev_err(&ctrl_info->pci_dev->dev,
4253 "error creating outbound queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004254 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004255 }
4256
4257 queue_group->oq_ci = ctrl_info->iomem_base +
4258 PQI_DEVICE_REGISTERS_OFFSET +
4259 get_unaligned_le64(
4260 &response.data.create_operational_oq.oq_ci_offset);
4261
Kevin Barnett6c223762016-06-27 16:41:00 -05004262 return 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004263}
4264
4265static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4266{
4267 int rc;
4268 unsigned int i;
4269
4270 rc = pqi_create_event_queue(ctrl_info);
4271 if (rc) {
4272 dev_err(&ctrl_info->pci_dev->dev,
4273 "error creating event queue\n");
4274 return rc;
4275 }
4276
4277 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
Kevin Barnett061ef062017-05-03 18:53:05 -05004278 rc = pqi_create_queue_group(ctrl_info, i);
Kevin Barnett6c223762016-06-27 16:41:00 -05004279 if (rc) {
4280 dev_err(&ctrl_info->pci_dev->dev,
4281 "error creating queue group number %u/%u\n",
4282 i, ctrl_info->num_queue_groups);
4283 return rc;
4284 }
4285 }
4286
4287 return 0;
4288}
4289
4290#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4291 (offsetof(struct pqi_event_config, descriptors) + \
4292 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4293
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004294static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4295 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05004296{
4297 int rc;
4298 unsigned int i;
4299 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004300 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05004301 struct pqi_general_management_request request;
4302
4303 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4304 GFP_KERNEL);
4305 if (!event_config)
4306 return -ENOMEM;
4307
4308 memset(&request, 0, sizeof(request));
4309
4310 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4311 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4312 data.report_event_configuration.sg_descriptors[1]) -
4313 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4314 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4315 &request.data.report_event_configuration.buffer_length);
4316
4317 rc = pqi_map_single(ctrl_info->pci_dev,
4318 request.data.report_event_configuration.sg_descriptors,
4319 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004320 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004321 if (rc)
4322 goto out;
4323
4324 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4325 0, NULL, NO_TIMEOUT);
4326
4327 pqi_pci_unmap(ctrl_info->pci_dev,
4328 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004329 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004330
4331 if (rc)
4332 goto out;
4333
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004334 for (i = 0; i < event_config->num_event_descriptors; i++) {
4335 event_descriptor = &event_config->descriptors[i];
4336 if (enable_events &&
4337 pqi_is_supported_event(event_descriptor->event_type))
4338 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4339 &event_descriptor->oq_id);
4340 else
4341 put_unaligned_le16(0, &event_descriptor->oq_id);
4342 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004343
4344 memset(&request, 0, sizeof(request));
4345
4346 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4347 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4348 data.report_event_configuration.sg_descriptors[1]) -
4349 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4350 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4351 &request.data.report_event_configuration.buffer_length);
4352
4353 rc = pqi_map_single(ctrl_info->pci_dev,
4354 request.data.report_event_configuration.sg_descriptors,
4355 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004356 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004357 if (rc)
4358 goto out;
4359
4360 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4361 NULL, NO_TIMEOUT);
4362
4363 pqi_pci_unmap(ctrl_info->pci_dev,
4364 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004365 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004366
4367out:
4368 kfree(event_config);
4369
4370 return rc;
4371}
4372
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004373static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4374{
4375 return pqi_configure_events(ctrl_info, true);
4376}
4377
4378static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4379{
4380 return pqi_configure_events(ctrl_info, false);
4381}
4382
Kevin Barnett6c223762016-06-27 16:41:00 -05004383static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4384{
4385 unsigned int i;
4386 struct device *dev;
4387 size_t sg_chain_buffer_length;
4388 struct pqi_io_request *io_request;
4389
4390 if (!ctrl_info->io_request_pool)
4391 return;
4392
4393 dev = &ctrl_info->pci_dev->dev;
4394 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4395 io_request = ctrl_info->io_request_pool;
4396
4397 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4398 kfree(io_request->iu);
4399 if (!io_request->sg_chain_buffer)
4400 break;
4401 dma_free_coherent(dev, sg_chain_buffer_length,
4402 io_request->sg_chain_buffer,
4403 io_request->sg_chain_buffer_dma_handle);
4404 io_request++;
4405 }
4406
4407 kfree(ctrl_info->io_request_pool);
4408 ctrl_info->io_request_pool = NULL;
4409}
4410
4411static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4412{
4413 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4414 ctrl_info->error_buffer_length,
4415 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4416
4417 if (!ctrl_info->error_buffer)
4418 return -ENOMEM;
4419
4420 return 0;
4421}
4422
4423static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4424{
4425 unsigned int i;
4426 void *sg_chain_buffer;
4427 size_t sg_chain_buffer_length;
4428 dma_addr_t sg_chain_buffer_dma_handle;
4429 struct device *dev;
4430 struct pqi_io_request *io_request;
4431
Kees Cook6396bb22018-06-12 14:03:40 -07004432 ctrl_info->io_request_pool =
4433 kcalloc(ctrl_info->max_io_slots,
4434 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05004435
4436 if (!ctrl_info->io_request_pool) {
4437 dev_err(&ctrl_info->pci_dev->dev,
4438 "failed to allocate I/O request pool\n");
4439 goto error;
4440 }
4441
4442 dev = &ctrl_info->pci_dev->dev;
4443 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4444 io_request = ctrl_info->io_request_pool;
4445
4446 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4447 io_request->iu =
4448 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4449
4450 if (!io_request->iu) {
4451 dev_err(&ctrl_info->pci_dev->dev,
4452 "failed to allocate IU buffers\n");
4453 goto error;
4454 }
4455
4456 sg_chain_buffer = dma_alloc_coherent(dev,
4457 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4458 GFP_KERNEL);
4459
4460 if (!sg_chain_buffer) {
4461 dev_err(&ctrl_info->pci_dev->dev,
4462 "failed to allocate PQI scatter-gather chain buffers\n");
4463 goto error;
4464 }
4465
4466 io_request->index = i;
4467 io_request->sg_chain_buffer = sg_chain_buffer;
4468 io_request->sg_chain_buffer_dma_handle =
4469 sg_chain_buffer_dma_handle;
4470 io_request++;
4471 }
4472
4473 return 0;
4474
4475error:
4476 pqi_free_all_io_requests(ctrl_info);
4477
4478 return -ENOMEM;
4479}
4480
4481/*
4482 * Calculate required resources that are sized based on max. outstanding
4483 * requests and max. transfer size.
4484 */
4485
4486static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4487{
4488 u32 max_transfer_size;
4489 u32 max_sg_entries;
4490
4491 ctrl_info->scsi_ml_can_queue =
4492 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4493 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4494
4495 ctrl_info->error_buffer_length =
4496 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4497
Kevin Barnettd727a772017-05-03 18:54:25 -05004498 if (reset_devices)
4499 max_transfer_size = min(ctrl_info->max_transfer_size,
4500 PQI_MAX_TRANSFER_SIZE_KDUMP);
4501 else
4502 max_transfer_size = min(ctrl_info->max_transfer_size,
4503 PQI_MAX_TRANSFER_SIZE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004504
4505 max_sg_entries = max_transfer_size / PAGE_SIZE;
4506
4507 /* +1 to cover when the buffer is not page-aligned. */
4508 max_sg_entries++;
4509
4510 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4511
4512 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4513
4514 ctrl_info->sg_chain_buffer_length =
Kevin Barnette1d213b2017-05-03 18:53:18 -05004515 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4516 PQI_EXTRA_SGL_MEMORY;
Kevin Barnett6c223762016-06-27 16:41:00 -05004517 ctrl_info->sg_tablesize = max_sg_entries;
4518 ctrl_info->max_sectors = max_transfer_size / 512;
4519}
4520
4521static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4522{
Kevin Barnett6c223762016-06-27 16:41:00 -05004523 int num_queue_groups;
4524 u16 num_elements_per_iq;
4525 u16 num_elements_per_oq;
4526
Kevin Barnettd727a772017-05-03 18:54:25 -05004527 if (reset_devices) {
4528 num_queue_groups = 1;
4529 } else {
4530 int num_cpus;
4531 int max_queue_groups;
Kevin Barnett6c223762016-06-27 16:41:00 -05004532
Kevin Barnettd727a772017-05-03 18:54:25 -05004533 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4534 ctrl_info->max_outbound_queues - 1);
4535 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4536
4537 num_cpus = num_online_cpus();
4538 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4539 num_queue_groups = min(num_queue_groups, max_queue_groups);
4540 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004541
4542 ctrl_info->num_queue_groups = num_queue_groups;
Kevin Barnett061ef062017-05-03 18:53:05 -05004543 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05004544
Kevin Barnett77668f42016-08-31 14:54:23 -05004545 /*
4546 * Make sure that the max. inbound IU length is an even multiple
4547 * of our inbound element length.
4548 */
4549 ctrl_info->max_inbound_iu_length =
4550 (ctrl_info->max_inbound_iu_length_per_firmware /
4551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4552 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004553
4554 num_elements_per_iq =
4555 (ctrl_info->max_inbound_iu_length /
4556 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4557
4558 /* Add one because one element in each queue is unusable. */
4559 num_elements_per_iq++;
4560
4561 num_elements_per_iq = min(num_elements_per_iq,
4562 ctrl_info->max_elements_per_iq);
4563
4564 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4565 num_elements_per_oq = min(num_elements_per_oq,
4566 ctrl_info->max_elements_per_oq);
4567
4568 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4569 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4570
4571 ctrl_info->max_sg_per_iu =
4572 ((ctrl_info->max_inbound_iu_length -
4573 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4574 sizeof(struct pqi_sg_descriptor)) +
4575 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4576}
4577
4578static inline void pqi_set_sg_descriptor(
4579 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4580{
4581 u64 address = (u64)sg_dma_address(sg);
4582 unsigned int length = sg_dma_len(sg);
4583
4584 put_unaligned_le64(address, &sg_descriptor->address);
4585 put_unaligned_le32(length, &sg_descriptor->length);
4586 put_unaligned_le32(0, &sg_descriptor->flags);
4587}
4588
4589static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4590 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4591 struct pqi_io_request *io_request)
4592{
4593 int i;
4594 u16 iu_length;
4595 int sg_count;
4596 bool chained;
4597 unsigned int num_sg_in_iu;
4598 unsigned int max_sg_per_iu;
4599 struct scatterlist *sg;
4600 struct pqi_sg_descriptor *sg_descriptor;
4601
4602 sg_count = scsi_dma_map(scmd);
4603 if (sg_count < 0)
4604 return sg_count;
4605
4606 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4607 PQI_REQUEST_HEADER_LENGTH;
4608
4609 if (sg_count == 0)
4610 goto out;
4611
4612 sg = scsi_sglist(scmd);
4613 sg_descriptor = request->sg_descriptors;
4614 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4615 chained = false;
4616 num_sg_in_iu = 0;
4617 i = 0;
4618
4619 while (1) {
4620 pqi_set_sg_descriptor(sg_descriptor, sg);
4621 if (!chained)
4622 num_sg_in_iu++;
4623 i++;
4624 if (i == sg_count)
4625 break;
4626 sg_descriptor++;
4627 if (i == max_sg_per_iu) {
4628 put_unaligned_le64(
4629 (u64)io_request->sg_chain_buffer_dma_handle,
4630 &sg_descriptor->address);
4631 put_unaligned_le32((sg_count - num_sg_in_iu)
4632 * sizeof(*sg_descriptor),
4633 &sg_descriptor->length);
4634 put_unaligned_le32(CISS_SG_CHAIN,
4635 &sg_descriptor->flags);
4636 chained = true;
4637 num_sg_in_iu++;
4638 sg_descriptor = io_request->sg_chain_buffer;
4639 }
4640 sg = sg_next(sg);
4641 }
4642
4643 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4644 request->partial = chained;
4645 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4646
4647out:
4648 put_unaligned_le16(iu_length, &request->header.iu_length);
4649
4650 return 0;
4651}
4652
4653static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4654 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4655 struct pqi_io_request *io_request)
4656{
4657 int i;
4658 u16 iu_length;
4659 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004660 bool chained;
4661 unsigned int num_sg_in_iu;
4662 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004663 struct scatterlist *sg;
4664 struct pqi_sg_descriptor *sg_descriptor;
4665
4666 sg_count = scsi_dma_map(scmd);
4667 if (sg_count < 0)
4668 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004669
4670 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4671 PQI_REQUEST_HEADER_LENGTH;
4672 num_sg_in_iu = 0;
4673
Kevin Barnett6c223762016-06-27 16:41:00 -05004674 if (sg_count == 0)
4675 goto out;
4676
Kevin Barnetta60eec02016-08-31 14:54:11 -05004677 sg = scsi_sglist(scmd);
4678 sg_descriptor = request->sg_descriptors;
4679 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4680 chained = false;
4681 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004682
Kevin Barnetta60eec02016-08-31 14:54:11 -05004683 while (1) {
4684 pqi_set_sg_descriptor(sg_descriptor, sg);
4685 if (!chained)
4686 num_sg_in_iu++;
4687 i++;
4688 if (i == sg_count)
4689 break;
4690 sg_descriptor++;
4691 if (i == max_sg_per_iu) {
4692 put_unaligned_le64(
4693 (u64)io_request->sg_chain_buffer_dma_handle,
4694 &sg_descriptor->address);
4695 put_unaligned_le32((sg_count - num_sg_in_iu)
4696 * sizeof(*sg_descriptor),
4697 &sg_descriptor->length);
4698 put_unaligned_le32(CISS_SG_CHAIN,
4699 &sg_descriptor->flags);
4700 chained = true;
4701 num_sg_in_iu++;
4702 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004703 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004704 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004705 }
4706
Kevin Barnetta60eec02016-08-31 14:54:11 -05004707 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4708 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004709 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004710
4711out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004712 put_unaligned_le16(iu_length, &request->header.iu_length);
4713 request->num_sg_descriptors = num_sg_in_iu;
4714
4715 return 0;
4716}
4717
4718static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4719 void *context)
4720{
4721 struct scsi_cmnd *scmd;
4722
4723 scmd = io_request->scmd;
4724 pqi_free_io_request(io_request);
4725 scsi_dma_unmap(scmd);
4726 pqi_scsi_done(scmd);
4727}
4728
Kevin Barnett376fb882017-05-03 18:54:43 -05004729static int pqi_raid_submit_scsi_cmd_with_io_request(
4730 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004731 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4732 struct pqi_queue_group *queue_group)
4733{
4734 int rc;
4735 size_t cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -05004736 struct pqi_raid_path_request *request;
4737
Kevin Barnett6c223762016-06-27 16:41:00 -05004738 io_request->io_complete_callback = pqi_raid_io_complete;
4739 io_request->scmd = scmd;
4740
Kevin Barnett6c223762016-06-27 16:41:00 -05004741 request = io_request->iu;
4742 memset(request, 0,
4743 offsetof(struct pqi_raid_path_request, sg_descriptors));
4744
4745 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4746 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4747 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4748 put_unaligned_le16(io_request->index, &request->request_id);
4749 request->error_index = request->request_id;
4750 memcpy(request->lun_number, device->scsi3addr,
4751 sizeof(request->lun_number));
4752
4753 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4754 memcpy(request->cdb, scmd->cmnd, cdb_length);
4755
4756 switch (cdb_length) {
4757 case 6:
4758 case 10:
4759 case 12:
4760 case 16:
4761 /* No bytes in the Additional CDB bytes field */
4762 request->additional_cdb_bytes_usage =
4763 SOP_ADDITIONAL_CDB_BYTES_0;
4764 break;
4765 case 20:
4766 /* 4 bytes in the Additional cdb field */
4767 request->additional_cdb_bytes_usage =
4768 SOP_ADDITIONAL_CDB_BYTES_4;
4769 break;
4770 case 24:
4771 /* 8 bytes in the Additional cdb field */
4772 request->additional_cdb_bytes_usage =
4773 SOP_ADDITIONAL_CDB_BYTES_8;
4774 break;
4775 case 28:
4776 /* 12 bytes in the Additional cdb field */
4777 request->additional_cdb_bytes_usage =
4778 SOP_ADDITIONAL_CDB_BYTES_12;
4779 break;
4780 case 32:
4781 default:
4782 /* 16 bytes in the Additional cdb field */
4783 request->additional_cdb_bytes_usage =
4784 SOP_ADDITIONAL_CDB_BYTES_16;
4785 break;
4786 }
4787
4788 switch (scmd->sc_data_direction) {
4789 case DMA_TO_DEVICE:
4790 request->data_direction = SOP_READ_FLAG;
4791 break;
4792 case DMA_FROM_DEVICE:
4793 request->data_direction = SOP_WRITE_FLAG;
4794 break;
4795 case DMA_NONE:
4796 request->data_direction = SOP_NO_DIRECTION_FLAG;
4797 break;
4798 case DMA_BIDIRECTIONAL:
4799 request->data_direction = SOP_BIDIRECTIONAL;
4800 break;
4801 default:
4802 dev_err(&ctrl_info->pci_dev->dev,
4803 "unknown data direction: %d\n",
4804 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004805 break;
4806 }
4807
4808 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4809 if (rc) {
4810 pqi_free_io_request(io_request);
4811 return SCSI_MLQUEUE_HOST_BUSY;
4812 }
4813
4814 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4815
4816 return 0;
4817}
4818
Kevin Barnett376fb882017-05-03 18:54:43 -05004819static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4820 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4821 struct pqi_queue_group *queue_group)
4822{
4823 struct pqi_io_request *io_request;
4824
4825 io_request = pqi_alloc_io_request(ctrl_info);
4826
4827 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4828 device, scmd, queue_group);
4829}
4830
4831static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4832{
4833 if (!pqi_ctrl_blocked(ctrl_info))
4834 schedule_work(&ctrl_info->raid_bypass_retry_work);
4835}
4836
4837static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4838{
4839 struct scsi_cmnd *scmd;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05004840 struct pqi_scsi_dev *device;
Kevin Barnett376fb882017-05-03 18:54:43 -05004841 struct pqi_ctrl_info *ctrl_info;
4842
4843 if (!io_request->raid_bypass)
4844 return false;
4845
4846 scmd = io_request->scmd;
4847 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4848 return false;
4849 if (host_byte(scmd->result) == DID_NO_CONNECT)
4850 return false;
4851
Kevin Barnett03b288cf2017-05-03 18:54:49 -05004852 device = scmd->device->hostdata;
4853 if (pqi_device_offline(device))
4854 return false;
4855
Kevin Barnett376fb882017-05-03 18:54:43 -05004856 ctrl_info = shost_to_hba(scmd->device->host);
4857 if (pqi_ctrl_offline(ctrl_info))
4858 return false;
4859
4860 return true;
4861}
4862
4863static inline void pqi_add_to_raid_bypass_retry_list(
4864 struct pqi_ctrl_info *ctrl_info,
4865 struct pqi_io_request *io_request, bool at_head)
4866{
4867 unsigned long flags;
4868
4869 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4870 if (at_head)
4871 list_add(&io_request->request_list_entry,
4872 &ctrl_info->raid_bypass_retry_list);
4873 else
4874 list_add_tail(&io_request->request_list_entry,
4875 &ctrl_info->raid_bypass_retry_list);
4876 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4877}
4878
4879static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4880 void *context)
4881{
4882 struct scsi_cmnd *scmd;
4883
4884 scmd = io_request->scmd;
4885 pqi_free_io_request(io_request);
4886 pqi_scsi_done(scmd);
4887}
4888
4889static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4890{
4891 struct scsi_cmnd *scmd;
4892 struct pqi_ctrl_info *ctrl_info;
4893
4894 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4895 scmd = io_request->scmd;
4896 scmd->result = 0;
4897 ctrl_info = shost_to_hba(scmd->device->host);
4898
4899 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4900 pqi_schedule_bypass_retry(ctrl_info);
4901}
4902
4903static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4904{
4905 struct scsi_cmnd *scmd;
4906 struct pqi_scsi_dev *device;
4907 struct pqi_ctrl_info *ctrl_info;
4908 struct pqi_queue_group *queue_group;
4909
4910 scmd = io_request->scmd;
4911 device = scmd->device->hostdata;
4912 if (pqi_device_in_reset(device)) {
4913 pqi_free_io_request(io_request);
4914 set_host_byte(scmd, DID_RESET);
4915 pqi_scsi_done(scmd);
4916 return 0;
4917 }
4918
4919 ctrl_info = shost_to_hba(scmd->device->host);
4920 queue_group = io_request->queue_group;
4921
4922 pqi_reinit_io_request(io_request);
4923
4924 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4925 device, scmd, queue_group);
4926}
4927
4928static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4929 struct pqi_ctrl_info *ctrl_info)
4930{
4931 unsigned long flags;
4932 struct pqi_io_request *io_request;
4933
4934 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4935 io_request = list_first_entry_or_null(
4936 &ctrl_info->raid_bypass_retry_list,
4937 struct pqi_io_request, request_list_entry);
4938 if (io_request)
4939 list_del(&io_request->request_list_entry);
4940 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4941
4942 return io_request;
4943}
4944
4945static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4946{
4947 int rc;
4948 struct pqi_io_request *io_request;
4949
4950 pqi_ctrl_busy(ctrl_info);
4951
4952 while (1) {
4953 if (pqi_ctrl_blocked(ctrl_info))
4954 break;
4955 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4956 if (!io_request)
4957 break;
4958 rc = pqi_retry_raid_bypass(io_request);
4959 if (rc) {
4960 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4961 true);
4962 pqi_schedule_bypass_retry(ctrl_info);
4963 break;
4964 }
4965 }
4966
4967 pqi_ctrl_unbusy(ctrl_info);
4968}
4969
4970static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4971{
4972 struct pqi_ctrl_info *ctrl_info;
4973
4974 ctrl_info = container_of(work, struct pqi_ctrl_info,
4975 raid_bypass_retry_work);
4976 pqi_retry_raid_bypass_requests(ctrl_info);
4977}
4978
Kevin Barnett5f310422017-05-03 18:54:55 -05004979static void pqi_clear_all_queued_raid_bypass_retries(
4980 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05004981{
4982 unsigned long flags;
Kevin Barnett376fb882017-05-03 18:54:43 -05004983
4984 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
Kevin Barnett5f310422017-05-03 18:54:55 -05004985 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
Kevin Barnett376fb882017-05-03 18:54:43 -05004986 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4987}
4988
Kevin Barnett6c223762016-06-27 16:41:00 -05004989static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4990 void *context)
4991{
4992 struct scsi_cmnd *scmd;
4993
4994 scmd = io_request->scmd;
4995 scsi_dma_unmap(scmd);
4996 if (io_request->status == -EAGAIN)
4997 set_host_byte(scmd, DID_IMM_RETRY);
Kevin Barnett376fb882017-05-03 18:54:43 -05004998 else if (pqi_raid_bypass_retry_needed(io_request)) {
4999 pqi_queue_raid_bypass_retry(io_request);
5000 return;
5001 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005002 pqi_free_io_request(io_request);
5003 pqi_scsi_done(scmd);
5004}
5005
5006static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5007 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5008 struct pqi_queue_group *queue_group)
5009{
5010 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05005011 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
Kevin Barnett6c223762016-06-27 16:41:00 -05005012}
5013
5014static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5015 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5016 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -05005017 struct pqi_encryption_info *encryption_info, bool raid_bypass)
Kevin Barnett6c223762016-06-27 16:41:00 -05005018{
5019 int rc;
5020 struct pqi_io_request *io_request;
5021 struct pqi_aio_path_request *request;
5022
5023 io_request = pqi_alloc_io_request(ctrl_info);
5024 io_request->io_complete_callback = pqi_aio_io_complete;
5025 io_request->scmd = scmd;
Kevin Barnett376fb882017-05-03 18:54:43 -05005026 io_request->raid_bypass = raid_bypass;
Kevin Barnett6c223762016-06-27 16:41:00 -05005027
5028 request = io_request->iu;
5029 memset(request, 0,
5030 offsetof(struct pqi_raid_path_request, sg_descriptors));
5031
5032 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5033 put_unaligned_le32(aio_handle, &request->nexus_id);
5034 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5035 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5036 put_unaligned_le16(io_request->index, &request->request_id);
5037 request->error_index = request->request_id;
5038 if (cdb_length > sizeof(request->cdb))
5039 cdb_length = sizeof(request->cdb);
5040 request->cdb_length = cdb_length;
5041 memcpy(request->cdb, cdb, cdb_length);
5042
5043 switch (scmd->sc_data_direction) {
5044 case DMA_TO_DEVICE:
5045 request->data_direction = SOP_READ_FLAG;
5046 break;
5047 case DMA_FROM_DEVICE:
5048 request->data_direction = SOP_WRITE_FLAG;
5049 break;
5050 case DMA_NONE:
5051 request->data_direction = SOP_NO_DIRECTION_FLAG;
5052 break;
5053 case DMA_BIDIRECTIONAL:
5054 request->data_direction = SOP_BIDIRECTIONAL;
5055 break;
5056 default:
5057 dev_err(&ctrl_info->pci_dev->dev,
5058 "unknown data direction: %d\n",
5059 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05005060 break;
5061 }
5062
5063 if (encryption_info) {
5064 request->encryption_enable = true;
5065 put_unaligned_le16(encryption_info->data_encryption_key_index,
5066 &request->data_encryption_key_index);
5067 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5068 &request->encrypt_tweak_lower);
5069 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5070 &request->encrypt_tweak_upper);
5071 }
5072
5073 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5074 if (rc) {
5075 pqi_free_io_request(io_request);
5076 return SCSI_MLQUEUE_HOST_BUSY;
5077 }
5078
5079 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5080
5081 return 0;
5082}
5083
Kevin Barnett061ef062017-05-03 18:53:05 -05005084static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5085 struct scsi_cmnd *scmd)
5086{
5087 u16 hw_queue;
5088
5089 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5090 if (hw_queue > ctrl_info->max_hw_queue_index)
5091 hw_queue = 0;
5092
5093 return hw_queue;
5094}
5095
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005096/*
5097 * This function gets called just before we hand the completed SCSI request
5098 * back to the SML.
5099 */
5100
5101void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5102{
5103 struct pqi_scsi_dev *device;
5104
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005105 if (!scmd->device) {
5106 set_host_byte(scmd, DID_NO_CONNECT);
5107 return;
5108 }
5109
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005110 device = scmd->device->hostdata;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005111 if (!device) {
5112 set_host_byte(scmd, DID_NO_CONNECT);
5113 return;
5114 }
5115
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005116 atomic_dec(&device->scsi_cmds_outstanding);
5117}
5118
Kevin Barnett6c223762016-06-27 16:41:00 -05005119static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05005120 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05005121{
5122 int rc;
5123 struct pqi_ctrl_info *ctrl_info;
5124 struct pqi_scsi_dev *device;
Kevin Barnett061ef062017-05-03 18:53:05 -05005125 u16 hw_queue;
Kevin Barnett6c223762016-06-27 16:41:00 -05005126 struct pqi_queue_group *queue_group;
5127 bool raid_bypassed;
5128
5129 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05005130 ctrl_info = shost_to_hba(shost);
5131
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005132 if (!device) {
5133 set_host_byte(scmd, DID_NO_CONNECT);
5134 pqi_scsi_done(scmd);
5135 return 0;
5136 }
5137
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005138 atomic_inc(&device->scsi_cmds_outstanding);
5139
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005140 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5141 device)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005142 set_host_byte(scmd, DID_NO_CONNECT);
5143 pqi_scsi_done(scmd);
5144 return 0;
5145 }
5146
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005147 pqi_ctrl_busy(ctrl_info);
5148 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
5149 rc = SCSI_MLQUEUE_HOST_BUSY;
5150 goto out;
5151 }
5152
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05005153 /*
5154 * This is necessary because the SML doesn't zero out this field during
5155 * error recovery.
5156 */
5157 scmd->result = 0;
5158
Kevin Barnett061ef062017-05-03 18:53:05 -05005159 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5160 queue_group = &ctrl_info->queue_groups[hw_queue];
Kevin Barnett6c223762016-06-27 16:41:00 -05005161
5162 if (pqi_is_logical_device(device)) {
5163 raid_bypassed = false;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05005164 if (device->raid_bypass_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01005165 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005166 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5167 scmd, queue_group);
Kevin Barnett376fb882017-05-03 18:54:43 -05005168 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5169 raid_bypassed = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05005170 }
5171 if (!raid_bypassed)
5172 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5173 queue_group);
5174 } else {
5175 if (device->aio_enabled)
5176 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5177 queue_group);
5178 else
5179 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5180 queue_group);
5181 }
5182
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005183out:
5184 pqi_ctrl_unbusy(ctrl_info);
5185 if (rc)
5186 atomic_dec(&device->scsi_cmds_outstanding);
5187
Kevin Barnett6c223762016-06-27 16:41:00 -05005188 return rc;
5189}
5190
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005191static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5192 struct pqi_queue_group *queue_group)
5193{
5194 unsigned int path;
5195 unsigned long flags;
5196 bool list_is_empty;
5197
5198 for (path = 0; path < 2; path++) {
5199 while (1) {
5200 spin_lock_irqsave(
5201 &queue_group->submit_lock[path], flags);
5202 list_is_empty =
5203 list_empty(&queue_group->request_list[path]);
5204 spin_unlock_irqrestore(
5205 &queue_group->submit_lock[path], flags);
5206 if (list_is_empty)
5207 break;
5208 pqi_check_ctrl_health(ctrl_info);
5209 if (pqi_ctrl_offline(ctrl_info))
5210 return -ENXIO;
5211 usleep_range(1000, 2000);
5212 }
5213 }
5214
5215 return 0;
5216}
5217
5218static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5219{
5220 int rc;
5221 unsigned int i;
5222 unsigned int path;
5223 struct pqi_queue_group *queue_group;
5224 pqi_index_t iq_pi;
5225 pqi_index_t iq_ci;
5226
5227 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5228 queue_group = &ctrl_info->queue_groups[i];
5229
5230 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5231 if (rc)
5232 return rc;
5233
5234 for (path = 0; path < 2; path++) {
5235 iq_pi = queue_group->iq_pi_copy[path];
5236
5237 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05005238 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005239 if (iq_ci == iq_pi)
5240 break;
5241 pqi_check_ctrl_health(ctrl_info);
5242 if (pqi_ctrl_offline(ctrl_info))
5243 return -ENXIO;
5244 usleep_range(1000, 2000);
5245 }
5246 }
5247 }
5248
5249 return 0;
5250}
5251
5252static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5253 struct pqi_scsi_dev *device)
5254{
5255 unsigned int i;
5256 unsigned int path;
5257 struct pqi_queue_group *queue_group;
5258 unsigned long flags;
5259 struct pqi_io_request *io_request;
5260 struct pqi_io_request *next;
5261 struct scsi_cmnd *scmd;
5262 struct pqi_scsi_dev *scsi_device;
5263
5264 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5265 queue_group = &ctrl_info->queue_groups[i];
5266
5267 for (path = 0; path < 2; path++) {
5268 spin_lock_irqsave(
5269 &queue_group->submit_lock[path], flags);
5270
5271 list_for_each_entry_safe(io_request, next,
5272 &queue_group->request_list[path],
5273 request_list_entry) {
5274 scmd = io_request->scmd;
5275 if (!scmd)
5276 continue;
5277
5278 scsi_device = scmd->device->hostdata;
5279 if (scsi_device != device)
5280 continue;
5281
5282 list_del(&io_request->request_list_entry);
5283 set_host_byte(scmd, DID_RESET);
5284 pqi_scsi_done(scmd);
5285 }
5286
5287 spin_unlock_irqrestore(
5288 &queue_group->submit_lock[path], flags);
5289 }
5290 }
5291}
5292
Kevin Barnett061ef062017-05-03 18:53:05 -05005293static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005294 struct pqi_scsi_dev *device, unsigned long timeout_secs)
Kevin Barnett061ef062017-05-03 18:53:05 -05005295{
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005296 unsigned long timeout;
5297
5298 timeout = (timeout_secs * HZ) + jiffies;
5299
Kevin Barnett061ef062017-05-03 18:53:05 -05005300 while (atomic_read(&device->scsi_cmds_outstanding)) {
5301 pqi_check_ctrl_health(ctrl_info);
5302 if (pqi_ctrl_offline(ctrl_info))
5303 return -ENXIO;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005304 if (timeout_secs != NO_TIMEOUT) {
5305 if (time_after(jiffies, timeout)) {
5306 dev_err(&ctrl_info->pci_dev->dev,
5307 "timed out waiting for pending IO\n");
5308 return -ETIMEDOUT;
5309 }
5310 }
Kevin Barnett061ef062017-05-03 18:53:05 -05005311 usleep_range(1000, 2000);
5312 }
5313
5314 return 0;
5315}
5316
5317static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5318{
5319 bool io_pending;
5320 unsigned long flags;
5321 struct pqi_scsi_dev *device;
5322
5323 while (1) {
5324 io_pending = false;
5325
5326 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5327 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5328 scsi_device_list_entry) {
5329 if (atomic_read(&device->scsi_cmds_outstanding)) {
5330 io_pending = true;
5331 break;
5332 }
5333 }
5334 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5335 flags);
5336
5337 if (!io_pending)
5338 break;
5339
5340 pqi_check_ctrl_health(ctrl_info);
5341 if (pqi_ctrl_offline(ctrl_info))
5342 return -ENXIO;
5343
5344 usleep_range(1000, 2000);
5345 }
5346
5347 return 0;
5348}
5349
Kevin Barnett14bb2152016-08-31 14:54:35 -05005350static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05005351 void *context)
5352{
5353 struct completion *waiting = context;
5354
5355 complete(waiting);
5356}
5357
Kevin Barnett14bb2152016-08-31 14:54:35 -05005358#define PQI_LUN_RESET_TIMEOUT_SECS 10
5359
5360static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5361 struct pqi_scsi_dev *device, struct completion *wait)
5362{
5363 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005364
5365 while (1) {
5366 if (wait_for_completion_io_timeout(wait,
5367 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5368 rc = 0;
5369 break;
5370 }
5371
5372 pqi_check_ctrl_health(ctrl_info);
5373 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett4e8415e2017-05-03 18:54:18 -05005374 rc = -ENXIO;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005375 break;
5376 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05005377 }
5378
5379 return rc;
5380}
5381
5382static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005383 struct pqi_scsi_dev *device)
5384{
5385 int rc;
5386 struct pqi_io_request *io_request;
5387 DECLARE_COMPLETION_ONSTACK(wait);
5388 struct pqi_task_management_request *request;
5389
Kevin Barnett6c223762016-06-27 16:41:00 -05005390 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05005391 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05005392 io_request->context = &wait;
5393
5394 request = io_request->iu;
5395 memset(request, 0, sizeof(*request));
5396
5397 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5398 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5399 &request->header.iu_length);
5400 put_unaligned_le16(io_request->index, &request->request_id);
5401 memcpy(request->lun_number, device->scsi3addr,
5402 sizeof(request->lun_number));
5403 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5404
5405 pqi_start_io(ctrl_info,
5406 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5407 io_request);
5408
Kevin Barnett14bb2152016-08-31 14:54:35 -05005409 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5410 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05005411 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05005412
5413 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05005414
5415 return rc;
5416}
5417
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005418#define PQI_LUN_RESET_RETRIES 3
5419#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
Kevin Barnett6c223762016-06-27 16:41:00 -05005420/* Performs a reset at the LUN level. */
5421
5422static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5423 struct pqi_scsi_dev *device)
5424{
5425 int rc;
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005426 unsigned int retries;
Kevin Barnett6c223762016-06-27 16:41:00 -05005427
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005428 for (retries = 0;;) {
5429 rc = pqi_lun_reset(ctrl_info, device);
5430 if (rc != -EAGAIN ||
5431 ++retries > PQI_LUN_RESET_RETRIES)
5432 break;
5433 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5434 }
Kevin Barnett061ef062017-05-03 18:53:05 -05005435 if (rc == 0)
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005436 rc = pqi_device_wait_for_pending_io(ctrl_info,
5437 device, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -05005438
Kevin Barnett14bb2152016-08-31 14:54:35 -05005439 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05005440}
5441
5442static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5443{
5444 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005445 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05005446 struct pqi_ctrl_info *ctrl_info;
5447 struct pqi_scsi_dev *device;
5448
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005449 shost = scmd->device->host;
5450 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05005451 device = scmd->device->hostdata;
5452
5453 dev_err(&ctrl_info->pci_dev->dev,
5454 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005455 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05005456
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005457 pqi_check_ctrl_health(ctrl_info);
5458 if (pqi_ctrl_offline(ctrl_info)) {
5459 rc = FAILED;
5460 goto out;
5461 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005462
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005463 mutex_lock(&ctrl_info->lun_reset_mutex);
5464
5465 pqi_ctrl_block_requests(ctrl_info);
5466 pqi_ctrl_wait_until_quiesced(ctrl_info);
5467 pqi_fail_io_queued_for_device(ctrl_info, device);
5468 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5469 pqi_device_reset_start(device);
5470 pqi_ctrl_unblock_requests(ctrl_info);
5471
5472 if (rc)
5473 rc = FAILED;
5474 else
5475 rc = pqi_device_reset(ctrl_info, device);
5476
5477 pqi_device_reset_done(device);
5478
5479 mutex_unlock(&ctrl_info->lun_reset_mutex);
5480
5481out:
Kevin Barnett6c223762016-06-27 16:41:00 -05005482 dev_err(&ctrl_info->pci_dev->dev,
5483 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005484 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05005485 rc == SUCCESS ? "SUCCESS" : "FAILED");
5486
5487 return rc;
5488}
5489
5490static int pqi_slave_alloc(struct scsi_device *sdev)
5491{
5492 struct pqi_scsi_dev *device;
5493 unsigned long flags;
5494 struct pqi_ctrl_info *ctrl_info;
5495 struct scsi_target *starget;
5496 struct sas_rphy *rphy;
5497
5498 ctrl_info = shost_to_hba(sdev->host);
5499
5500 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5501
5502 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5503 starget = scsi_target(sdev);
5504 rphy = target_to_rphy(starget);
5505 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5506 if (device) {
5507 device->target = sdev_id(sdev);
5508 device->lun = sdev->lun;
5509 device->target_lun_valid = true;
5510 }
5511 } else {
5512 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5513 sdev_id(sdev), sdev->lun);
5514 }
5515
Kevin Barnett94086f52017-05-03 18:54:31 -05005516 if (device) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005517 sdev->hostdata = device;
5518 device->sdev = sdev;
5519 if (device->queue_depth) {
5520 device->advertised_queue_depth = device->queue_depth;
5521 scsi_change_queue_depth(sdev,
5522 device->advertised_queue_depth);
5523 }
Dave Carrollb6e2ef62018-12-07 16:28:23 -06005524 if (pqi_is_logical_device(device))
5525 pqi_disable_write_same(sdev);
Dave Carroll2b447f82018-12-07 16:29:05 -06005526 else
5527 sdev->allow_restart = 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05005528 }
5529
5530 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5531
5532 return 0;
5533}
5534
Christoph Hellwig52198222016-11-01 08:12:49 -06005535static int pqi_map_queues(struct Scsi_Host *shost)
5536{
5537 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5538
Keith Buschf23f5bec2018-03-27 09:39:06 -06005539 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
Christoph Hellwig52198222016-11-01 08:12:49 -06005540}
5541
Kevin Barnett6c223762016-06-27 16:41:00 -05005542static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5543 void __user *arg)
5544{
5545 struct pci_dev *pci_dev;
5546 u32 subsystem_vendor;
5547 u32 subsystem_device;
5548 cciss_pci_info_struct pciinfo;
5549
5550 if (!arg)
5551 return -EINVAL;
5552
5553 pci_dev = ctrl_info->pci_dev;
5554
5555 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5556 pciinfo.bus = pci_dev->bus->number;
5557 pciinfo.dev_fn = pci_dev->devfn;
5558 subsystem_vendor = pci_dev->subsystem_vendor;
5559 subsystem_device = pci_dev->subsystem_device;
5560 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5561 subsystem_vendor;
5562
5563 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5564 return -EFAULT;
5565
5566 return 0;
5567}
5568
5569static int pqi_getdrivver_ioctl(void __user *arg)
5570{
5571 u32 version;
5572
5573 if (!arg)
5574 return -EINVAL;
5575
5576 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5577 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5578
5579 if (copy_to_user(arg, &version, sizeof(version)))
5580 return -EFAULT;
5581
5582 return 0;
5583}
5584
5585struct ciss_error_info {
5586 u8 scsi_status;
5587 int command_status;
5588 size_t sense_data_length;
5589};
5590
5591static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5592 struct ciss_error_info *ciss_error_info)
5593{
5594 int ciss_cmd_status;
5595 size_t sense_data_length;
5596
5597 switch (pqi_error_info->data_out_result) {
5598 case PQI_DATA_IN_OUT_GOOD:
5599 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5600 break;
5601 case PQI_DATA_IN_OUT_UNDERFLOW:
5602 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5603 break;
5604 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5605 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5606 break;
5607 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5608 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5609 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5610 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5611 case PQI_DATA_IN_OUT_ERROR:
5612 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5613 break;
5614 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5615 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5616 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5617 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5618 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5619 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5620 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5621 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5622 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5623 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5624 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5625 break;
5626 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5627 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5628 break;
5629 case PQI_DATA_IN_OUT_ABORTED:
5630 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5631 break;
5632 case PQI_DATA_IN_OUT_TIMEOUT:
5633 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5634 break;
5635 default:
5636 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5637 break;
5638 }
5639
5640 sense_data_length =
5641 get_unaligned_le16(&pqi_error_info->sense_data_length);
5642 if (sense_data_length == 0)
5643 sense_data_length =
5644 get_unaligned_le16(&pqi_error_info->response_data_length);
5645 if (sense_data_length)
5646 if (sense_data_length > sizeof(pqi_error_info->data))
5647 sense_data_length = sizeof(pqi_error_info->data);
5648
5649 ciss_error_info->scsi_status = pqi_error_info->status;
5650 ciss_error_info->command_status = ciss_cmd_status;
5651 ciss_error_info->sense_data_length = sense_data_length;
5652}
5653
5654static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5655{
5656 int rc;
5657 char *kernel_buffer = NULL;
5658 u16 iu_length;
5659 size_t sense_data_length;
5660 IOCTL_Command_struct iocommand;
5661 struct pqi_raid_path_request request;
5662 struct pqi_raid_error_info pqi_error_info;
5663 struct ciss_error_info ciss_error_info;
5664
5665 if (pqi_ctrl_offline(ctrl_info))
5666 return -ENXIO;
5667 if (!arg)
5668 return -EINVAL;
5669 if (!capable(CAP_SYS_RAWIO))
5670 return -EPERM;
5671 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5672 return -EFAULT;
5673 if (iocommand.buf_size < 1 &&
5674 iocommand.Request.Type.Direction != XFER_NONE)
5675 return -EINVAL;
5676 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5677 return -EINVAL;
5678 if (iocommand.Request.Type.Type != TYPE_CMD)
5679 return -EINVAL;
5680
5681 switch (iocommand.Request.Type.Direction) {
5682 case XFER_NONE:
5683 case XFER_WRITE:
5684 case XFER_READ:
Kevin Barnett41555d52017-08-10 13:46:51 -05005685 case XFER_READ | XFER_WRITE:
Kevin Barnett6c223762016-06-27 16:41:00 -05005686 break;
5687 default:
5688 return -EINVAL;
5689 }
5690
5691 if (iocommand.buf_size > 0) {
5692 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5693 if (!kernel_buffer)
5694 return -ENOMEM;
5695 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5696 if (copy_from_user(kernel_buffer, iocommand.buf,
5697 iocommand.buf_size)) {
5698 rc = -EFAULT;
5699 goto out;
5700 }
5701 } else {
5702 memset(kernel_buffer, 0, iocommand.buf_size);
5703 }
5704 }
5705
5706 memset(&request, 0, sizeof(request));
5707
5708 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5709 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5710 PQI_REQUEST_HEADER_LENGTH;
5711 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5712 sizeof(request.lun_number));
5713 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5714 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5715
5716 switch (iocommand.Request.Type.Direction) {
5717 case XFER_NONE:
5718 request.data_direction = SOP_NO_DIRECTION_FLAG;
5719 break;
5720 case XFER_WRITE:
5721 request.data_direction = SOP_WRITE_FLAG;
5722 break;
5723 case XFER_READ:
5724 request.data_direction = SOP_READ_FLAG;
5725 break;
Kevin Barnett41555d52017-08-10 13:46:51 -05005726 case XFER_READ | XFER_WRITE:
5727 request.data_direction = SOP_BIDIRECTIONAL;
5728 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05005729 }
5730
5731 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5732
5733 if (iocommand.buf_size > 0) {
5734 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5735
5736 rc = pqi_map_single(ctrl_info->pci_dev,
5737 &request.sg_descriptors[0], kernel_buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005738 iocommand.buf_size, DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005739 if (rc)
5740 goto out;
5741
5742 iu_length += sizeof(request.sg_descriptors[0]);
5743 }
5744
5745 put_unaligned_le16(iu_length, &request.header.iu_length);
5746
5747 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5748 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5749
5750 if (iocommand.buf_size > 0)
5751 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005752 DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005753
5754 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5755
5756 if (rc == 0) {
5757 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5758 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5759 iocommand.error_info.CommandStatus =
5760 ciss_error_info.command_status;
5761 sense_data_length = ciss_error_info.sense_data_length;
5762 if (sense_data_length) {
5763 if (sense_data_length >
5764 sizeof(iocommand.error_info.SenseInfo))
5765 sense_data_length =
5766 sizeof(iocommand.error_info.SenseInfo);
5767 memcpy(iocommand.error_info.SenseInfo,
5768 pqi_error_info.data, sense_data_length);
5769 iocommand.error_info.SenseLen = sense_data_length;
5770 }
5771 }
5772
5773 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5774 rc = -EFAULT;
5775 goto out;
5776 }
5777
5778 if (rc == 0 && iocommand.buf_size > 0 &&
5779 (iocommand.Request.Type.Direction & XFER_READ)) {
5780 if (copy_to_user(iocommand.buf, kernel_buffer,
5781 iocommand.buf_size)) {
5782 rc = -EFAULT;
5783 }
5784 }
5785
5786out:
5787 kfree(kernel_buffer);
5788
5789 return rc;
5790}
5791
5792static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5793{
5794 int rc;
5795 struct pqi_ctrl_info *ctrl_info;
5796
5797 ctrl_info = shost_to_hba(sdev->host);
5798
5799 switch (cmd) {
5800 case CCISS_DEREGDISK:
5801 case CCISS_REGNEWDISK:
5802 case CCISS_REGNEWD:
5803 rc = pqi_scan_scsi_devices(ctrl_info);
5804 break;
5805 case CCISS_GETPCIINFO:
5806 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5807 break;
5808 case CCISS_GETDRIVVER:
5809 rc = pqi_getdrivver_ioctl(arg);
5810 break;
5811 case CCISS_PASSTHRU:
5812 rc = pqi_passthru_ioctl(ctrl_info, arg);
5813 break;
5814 default:
5815 rc = -EINVAL;
5816 break;
5817 }
5818
5819 return rc;
5820}
5821
5822static ssize_t pqi_version_show(struct device *dev,
5823 struct device_attribute *attr, char *buffer)
5824{
5825 ssize_t count = 0;
5826 struct Scsi_Host *shost;
5827 struct pqi_ctrl_info *ctrl_info;
5828
5829 shost = class_to_shost(dev);
5830 ctrl_info = shost_to_hba(shost);
5831
5832 count += snprintf(buffer + count, PAGE_SIZE - count,
5833 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5834
5835 count += snprintf(buffer + count, PAGE_SIZE - count,
5836 "firmware: %s\n", ctrl_info->firmware_version);
5837
5838 return count;
5839}
5840
5841static ssize_t pqi_host_rescan_store(struct device *dev,
5842 struct device_attribute *attr, const char *buffer, size_t count)
5843{
5844 struct Scsi_Host *shost = class_to_shost(dev);
5845
5846 pqi_scan_start(shost);
5847
5848 return count;
5849}
5850
Kevin Barnett3c509762017-05-03 18:54:37 -05005851static ssize_t pqi_lockup_action_show(struct device *dev,
5852 struct device_attribute *attr, char *buffer)
5853{
5854 int count = 0;
5855 unsigned int i;
5856
5857 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5858 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5859 count += snprintf(buffer + count, PAGE_SIZE - count,
5860 "[%s] ", pqi_lockup_actions[i].name);
5861 else
5862 count += snprintf(buffer + count, PAGE_SIZE - count,
5863 "%s ", pqi_lockup_actions[i].name);
5864 }
5865
5866 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5867
5868 return count;
5869}
5870
5871static ssize_t pqi_lockup_action_store(struct device *dev,
5872 struct device_attribute *attr, const char *buffer, size_t count)
5873{
5874 unsigned int i;
5875 char *action_name;
5876 char action_name_buffer[32];
5877
5878 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5879 action_name = strstrip(action_name_buffer);
5880
5881 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5882 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5883 pqi_lockup_action = pqi_lockup_actions[i].action;
5884 return count;
5885 }
5886 }
5887
5888 return -EINVAL;
5889}
5890
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05005891static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5892static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
Kevin Barnett3c509762017-05-03 18:54:37 -05005893static DEVICE_ATTR(lockup_action, 0644,
5894 pqi_lockup_action_show, pqi_lockup_action_store);
Kevin Barnett6c223762016-06-27 16:41:00 -05005895
5896static struct device_attribute *pqi_shost_attrs[] = {
5897 &dev_attr_version,
5898 &dev_attr_rescan,
Kevin Barnett3c509762017-05-03 18:54:37 -05005899 &dev_attr_lockup_action,
Kevin Barnett6c223762016-06-27 16:41:00 -05005900 NULL
5901};
5902
Dave Carrollcd128242018-12-07 16:28:47 -06005903static ssize_t pqi_unique_id_show(struct device *dev,
5904 struct device_attribute *attr, char *buffer)
5905{
5906 struct pqi_ctrl_info *ctrl_info;
5907 struct scsi_device *sdev;
5908 struct pqi_scsi_dev *device;
5909 unsigned long flags;
5910 unsigned char uid[16];
5911
5912 sdev = to_scsi_device(dev);
5913 ctrl_info = shost_to_hba(sdev->host);
5914
5915 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5916
5917 device = sdev->hostdata;
5918 if (!device) {
5919 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5920 flags);
5921 return -ENODEV;
5922 }
5923 memcpy(uid, device->unique_id, sizeof(uid));
5924
5925 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5926
Murthy Bhat5995b232018-12-07 16:28:59 -06005927 return snprintf(buffer, PAGE_SIZE,
5928 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
5929 uid[0], uid[1], uid[2], uid[3],
5930 uid[4], uid[5], uid[6], uid[7],
5931 uid[8], uid[9], uid[10], uid[11],
5932 uid[12], uid[13], uid[14], uid[15]);
Dave Carrollcd128242018-12-07 16:28:47 -06005933}
5934
5935static ssize_t pqi_lunid_show(struct device *dev,
5936 struct device_attribute *attr, char *buffer)
5937{
5938 struct pqi_ctrl_info *ctrl_info;
5939 struct scsi_device *sdev;
5940 struct pqi_scsi_dev *device;
5941 unsigned long flags;
5942 u8 lunid[8];
5943
5944 sdev = to_scsi_device(dev);
5945 ctrl_info = shost_to_hba(sdev->host);
5946
5947 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5948
5949 device = sdev->hostdata;
5950 if (!device) {
5951 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5952 flags);
5953 return -ENODEV;
5954 }
5955 memcpy(lunid, device->scsi3addr, sizeof(lunid));
5956
5957 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5958
5959 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
5960}
5961
5962#define MAX_PATHS 8
5963static ssize_t pqi_path_info_show(struct device *dev,
5964 struct device_attribute *attr, char *buf)
5965{
5966 struct pqi_ctrl_info *ctrl_info;
5967 struct scsi_device *sdev;
5968 struct pqi_scsi_dev *device;
5969 unsigned long flags;
5970 int i;
5971 int output_len = 0;
5972 u8 box;
5973 u8 bay;
5974 u8 path_map_index = 0;
5975 char *active;
5976 unsigned char phys_connector[2];
5977
5978 sdev = to_scsi_device(dev);
5979 ctrl_info = shost_to_hba(sdev->host);
5980
5981 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5982
5983 device = sdev->hostdata;
5984 if (!device) {
5985 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5986 flags);
5987 return -ENODEV;
5988 }
5989
5990 bay = device->bay;
5991 for (i = 0; i < MAX_PATHS; i++) {
5992 path_map_index = 1<<i;
5993 if (i == device->active_path_index)
5994 active = "Active";
5995 else if (device->path_map & path_map_index)
5996 active = "Inactive";
5997 else
5998 continue;
5999
6000 output_len += scnprintf(buf + output_len,
6001 PAGE_SIZE - output_len,
6002 "[%d:%d:%d:%d] %20.20s ",
6003 ctrl_info->scsi_host->host_no,
6004 device->bus, device->target,
6005 device->lun,
6006 scsi_device_type(device->devtype));
6007
6008 if (device->devtype == TYPE_RAID ||
6009 pqi_is_logical_device(device))
6010 goto end_buffer;
6011
6012 memcpy(&phys_connector, &device->phys_connector[i],
6013 sizeof(phys_connector));
6014 if (phys_connector[0] < '0')
6015 phys_connector[0] = '0';
6016 if (phys_connector[1] < '0')
6017 phys_connector[1] = '0';
6018
6019 output_len += scnprintf(buf + output_len,
6020 PAGE_SIZE - output_len,
6021 "PORT: %.2s ", phys_connector);
6022
6023 box = device->box[i];
6024 if (box != 0 && box != 0xFF)
6025 output_len += scnprintf(buf + output_len,
6026 PAGE_SIZE - output_len,
6027 "BOX: %hhu ", box);
6028
6029 if ((device->devtype == TYPE_DISK ||
6030 device->devtype == TYPE_ZBC) &&
6031 pqi_expose_device(device))
6032 output_len += scnprintf(buf + output_len,
6033 PAGE_SIZE - output_len,
6034 "BAY: %hhu ", bay);
6035
6036end_buffer:
6037 output_len += scnprintf(buf + output_len,
6038 PAGE_SIZE - output_len,
6039 "%s\n", active);
6040 }
6041
6042 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6043 return output_len;
6044}
6045
6046
Kevin Barnett6c223762016-06-27 16:41:00 -05006047static ssize_t pqi_sas_address_show(struct device *dev,
6048 struct device_attribute *attr, char *buffer)
6049{
6050 struct pqi_ctrl_info *ctrl_info;
6051 struct scsi_device *sdev;
6052 struct pqi_scsi_dev *device;
6053 unsigned long flags;
6054 u64 sas_address;
6055
6056 sdev = to_scsi_device(dev);
6057 ctrl_info = shost_to_hba(sdev->host);
6058
6059 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6060
6061 device = sdev->hostdata;
6062 if (pqi_is_logical_device(device)) {
6063 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6064 flags);
6065 return -ENODEV;
6066 }
6067 sas_address = device->sas_address;
6068
6069 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6070
6071 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6072}
6073
6074static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6075 struct device_attribute *attr, char *buffer)
6076{
6077 struct pqi_ctrl_info *ctrl_info;
6078 struct scsi_device *sdev;
6079 struct pqi_scsi_dev *device;
6080 unsigned long flags;
6081
6082 sdev = to_scsi_device(dev);
6083 ctrl_info = shost_to_hba(sdev->host);
6084
6085 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6086
6087 device = sdev->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05006088 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
Kevin Barnett6c223762016-06-27 16:41:00 -05006089 buffer[1] = '\n';
6090 buffer[2] = '\0';
6091
6092 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6093
6094 return 2;
6095}
6096
Kevin Barnetta9f93392017-05-03 18:55:31 -05006097static ssize_t pqi_raid_level_show(struct device *dev,
6098 struct device_attribute *attr, char *buffer)
6099{
6100 struct pqi_ctrl_info *ctrl_info;
6101 struct scsi_device *sdev;
6102 struct pqi_scsi_dev *device;
6103 unsigned long flags;
6104 char *raid_level;
6105
6106 sdev = to_scsi_device(dev);
6107 ctrl_info = shost_to_hba(sdev->host);
6108
6109 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6110
6111 device = sdev->hostdata;
6112
6113 if (pqi_is_logical_device(device))
6114 raid_level = pqi_raid_level_to_string(device->raid_level);
6115 else
6116 raid_level = "N/A";
6117
6118 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6119
6120 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6121}
6122
Dave Carrollcd128242018-12-07 16:28:47 -06006123static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6124static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6125static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006126static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6127static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
Kevin Barnett6c223762016-06-27 16:41:00 -05006128 pqi_ssd_smart_path_enabled_show, NULL);
Kevin Barnetta9f93392017-05-03 18:55:31 -05006129static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
Kevin Barnett6c223762016-06-27 16:41:00 -05006130
6131static struct device_attribute *pqi_sdev_attrs[] = {
Dave Carrollcd128242018-12-07 16:28:47 -06006132 &dev_attr_lunid,
6133 &dev_attr_unique_id,
6134 &dev_attr_path_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05006135 &dev_attr_sas_address,
6136 &dev_attr_ssd_smart_path_enabled,
Kevin Barnetta9f93392017-05-03 18:55:31 -05006137 &dev_attr_raid_level,
Kevin Barnett6c223762016-06-27 16:41:00 -05006138 NULL
6139};
6140
6141static struct scsi_host_template pqi_driver_template = {
6142 .module = THIS_MODULE,
6143 .name = DRIVER_NAME_SHORT,
6144 .proc_name = DRIVER_NAME_SHORT,
6145 .queuecommand = pqi_scsi_queue_command,
6146 .scan_start = pqi_scan_start,
6147 .scan_finished = pqi_scan_finished,
6148 .this_id = -1,
Kevin Barnett6c223762016-06-27 16:41:00 -05006149 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6150 .ioctl = pqi_ioctl,
6151 .slave_alloc = pqi_slave_alloc,
Christoph Hellwig52198222016-11-01 08:12:49 -06006152 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05006153 .sdev_attrs = pqi_sdev_attrs,
6154 .shost_attrs = pqi_shost_attrs,
6155};
6156
6157static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6158{
6159 int rc;
6160 struct Scsi_Host *shost;
6161
6162 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6163 if (!shost) {
6164 dev_err(&ctrl_info->pci_dev->dev,
6165 "scsi_host_alloc failed for controller %u\n",
6166 ctrl_info->ctrl_id);
6167 return -ENOMEM;
6168 }
6169
6170 shost->io_port = 0;
6171 shost->n_io_port = 0;
6172 shost->this_id = -1;
6173 shost->max_channel = PQI_MAX_BUS;
6174 shost->max_cmd_len = MAX_COMMAND_SIZE;
6175 shost->max_lun = ~0;
6176 shost->max_id = ~0;
6177 shost->max_sectors = ctrl_info->max_sectors;
6178 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6179 shost->cmd_per_lun = shost->can_queue;
6180 shost->sg_tablesize = ctrl_info->sg_tablesize;
6181 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06006182 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05006183 shost->unique_id = shost->irq;
6184 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6185 shost->hostdata[0] = (unsigned long)ctrl_info;
6186
6187 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6188 if (rc) {
6189 dev_err(&ctrl_info->pci_dev->dev,
6190 "scsi_add_host failed for controller %u\n",
6191 ctrl_info->ctrl_id);
6192 goto free_host;
6193 }
6194
6195 rc = pqi_add_sas_host(shost, ctrl_info);
6196 if (rc) {
6197 dev_err(&ctrl_info->pci_dev->dev,
6198 "add SAS host failed for controller %u\n",
6199 ctrl_info->ctrl_id);
6200 goto remove_host;
6201 }
6202
6203 ctrl_info->scsi_host = shost;
6204
6205 return 0;
6206
6207remove_host:
6208 scsi_remove_host(shost);
6209free_host:
6210 scsi_host_put(shost);
6211
6212 return rc;
6213}
6214
6215static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6216{
6217 struct Scsi_Host *shost;
6218
6219 pqi_delete_sas_host(ctrl_info);
6220
6221 shost = ctrl_info->scsi_host;
6222 if (!shost)
6223 return;
6224
6225 scsi_remove_host(shost);
6226 scsi_host_put(shost);
6227}
6228
Kevin Barnett336b6812017-08-10 13:46:39 -05006229static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6230{
6231 int rc = 0;
6232 struct pqi_device_registers __iomem *pqi_registers;
6233 unsigned long timeout;
6234 unsigned int timeout_msecs;
6235 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05006236
Kevin Barnett336b6812017-08-10 13:46:39 -05006237 pqi_registers = ctrl_info->pqi_registers;
6238 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6239 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6240
6241 while (1) {
6242 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6243 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6244 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6245 break;
6246 pqi_check_ctrl_health(ctrl_info);
6247 if (pqi_ctrl_offline(ctrl_info)) {
6248 rc = -ENXIO;
6249 break;
6250 }
6251 if (time_after(jiffies, timeout)) {
6252 rc = -ETIMEDOUT;
6253 break;
6254 }
6255 }
6256
6257 return rc;
6258}
Kevin Barnett6c223762016-06-27 16:41:00 -05006259
6260static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6261{
6262 int rc;
Kevin Barnett336b6812017-08-10 13:46:39 -05006263 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05006264
Kevin Barnett336b6812017-08-10 13:46:39 -05006265 if (ctrl_info->pqi_reset_quiesce_supported) {
6266 rc = sis_pqi_reset_quiesce(ctrl_info);
6267 if (rc) {
6268 dev_err(&ctrl_info->pci_dev->dev,
6269 "PQI reset failed during quiesce with error %d\n",
6270 rc);
6271 return rc;
6272 }
6273 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006274
Kevin Barnett336b6812017-08-10 13:46:39 -05006275 reset_reg.all_bits = 0;
6276 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6277 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
Kevin Barnett6c223762016-06-27 16:41:00 -05006278
Kevin Barnett336b6812017-08-10 13:46:39 -05006279 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6280
6281 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006282 if (rc)
6283 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett336b6812017-08-10 13:46:39 -05006284 "PQI reset failed with error %d\n", rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05006285
6286 return rc;
6287}
6288
6289static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
6290{
6291 int rc;
6292 struct bmic_identify_controller *identify;
6293
6294 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6295 if (!identify)
6296 return -ENOMEM;
6297
6298 rc = pqi_identify_controller(ctrl_info, identify);
6299 if (rc)
6300 goto out;
6301
6302 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6303 sizeof(identify->firmware_version));
6304 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6305 snprintf(ctrl_info->firmware_version +
6306 strlen(ctrl_info->firmware_version),
6307 sizeof(ctrl_info->firmware_version),
6308 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6309
6310out:
6311 kfree(identify);
6312
6313 return rc;
6314}
6315
Kevin Barnettb212c252018-12-07 16:28:10 -06006316struct pqi_config_table_section_info {
6317 struct pqi_ctrl_info *ctrl_info;
6318 void *section;
6319 u32 section_offset;
6320 void __iomem *section_iomem_addr;
6321};
6322
6323static inline bool pqi_is_firmware_feature_supported(
6324 struct pqi_config_table_firmware_features *firmware_features,
6325 unsigned int bit_position)
6326{
6327 unsigned int byte_index;
6328
6329 byte_index = bit_position / BITS_PER_BYTE;
6330
6331 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6332 return false;
6333
6334 return firmware_features->features_supported[byte_index] &
6335 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6336}
6337
6338static inline bool pqi_is_firmware_feature_enabled(
6339 struct pqi_config_table_firmware_features *firmware_features,
6340 void __iomem *firmware_features_iomem_addr,
6341 unsigned int bit_position)
6342{
6343 unsigned int byte_index;
6344 u8 __iomem *features_enabled_iomem_addr;
6345
6346 byte_index = (bit_position / BITS_PER_BYTE) +
6347 (le16_to_cpu(firmware_features->num_elements) * 2);
6348
6349 features_enabled_iomem_addr = firmware_features_iomem_addr +
6350 offsetof(struct pqi_config_table_firmware_features,
6351 features_supported) + byte_index;
6352
6353 return *((__force u8 *)features_enabled_iomem_addr) &
6354 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6355}
6356
6357static inline void pqi_request_firmware_feature(
6358 struct pqi_config_table_firmware_features *firmware_features,
6359 unsigned int bit_position)
6360{
6361 unsigned int byte_index;
6362
6363 byte_index = (bit_position / BITS_PER_BYTE) +
6364 le16_to_cpu(firmware_features->num_elements);
6365
6366 firmware_features->features_supported[byte_index] |=
6367 (1 << (bit_position % BITS_PER_BYTE));
6368}
6369
6370static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6371 u16 first_section, u16 last_section)
6372{
6373 struct pqi_vendor_general_request request;
6374
6375 memset(&request, 0, sizeof(request));
6376
6377 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6378 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6379 &request.header.iu_length);
6380 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6381 &request.function_code);
6382 put_unaligned_le16(first_section,
6383 &request.data.config_table_update.first_section);
6384 put_unaligned_le16(last_section,
6385 &request.data.config_table_update.last_section);
6386
6387 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6388 0, NULL, NO_TIMEOUT);
6389}
6390
6391static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6392 struct pqi_config_table_firmware_features *firmware_features,
6393 void __iomem *firmware_features_iomem_addr)
6394{
6395 void *features_requested;
6396 void __iomem *features_requested_iomem_addr;
6397
6398 features_requested = firmware_features->features_supported +
6399 le16_to_cpu(firmware_features->num_elements);
6400
6401 features_requested_iomem_addr = firmware_features_iomem_addr +
6402 (features_requested - (void *)firmware_features);
6403
6404 memcpy_toio(features_requested_iomem_addr, features_requested,
6405 le16_to_cpu(firmware_features->num_elements));
6406
6407 return pqi_config_table_update(ctrl_info,
6408 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6409 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6410}
6411
6412struct pqi_firmware_feature {
6413 char *feature_name;
6414 unsigned int feature_bit;
6415 bool supported;
6416 bool enabled;
6417 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6418 struct pqi_firmware_feature *firmware_feature);
6419};
6420
6421static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6422 struct pqi_firmware_feature *firmware_feature)
6423{
6424 if (!firmware_feature->supported) {
6425 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6426 firmware_feature->feature_name);
6427 return;
6428 }
6429
6430 if (firmware_feature->enabled) {
6431 dev_info(&ctrl_info->pci_dev->dev,
6432 "%s enabled\n", firmware_feature->feature_name);
6433 return;
6434 }
6435
6436 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6437 firmware_feature->feature_name);
6438}
6439
6440static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6441 struct pqi_firmware_feature *firmware_feature)
6442{
6443 if (firmware_feature->feature_status)
6444 firmware_feature->feature_status(ctrl_info, firmware_feature);
6445}
6446
6447static DEFINE_MUTEX(pqi_firmware_features_mutex);
6448
6449static struct pqi_firmware_feature pqi_firmware_features[] = {
6450 {
6451 .feature_name = "Online Firmware Activation",
6452 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6453 .feature_status = pqi_firmware_feature_status,
6454 },
6455 {
6456 .feature_name = "Serial Management Protocol",
6457 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6458 .feature_status = pqi_firmware_feature_status,
6459 },
6460};
6461
6462static void pqi_process_firmware_features(
6463 struct pqi_config_table_section_info *section_info)
6464{
6465 int rc;
6466 struct pqi_ctrl_info *ctrl_info;
6467 struct pqi_config_table_firmware_features *firmware_features;
6468 void __iomem *firmware_features_iomem_addr;
6469 unsigned int i;
6470 unsigned int num_features_supported;
6471
6472 ctrl_info = section_info->ctrl_info;
6473 firmware_features = section_info->section;
6474 firmware_features_iomem_addr = section_info->section_iomem_addr;
6475
6476 for (i = 0, num_features_supported = 0;
6477 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6478 if (pqi_is_firmware_feature_supported(firmware_features,
6479 pqi_firmware_features[i].feature_bit)) {
6480 pqi_firmware_features[i].supported = true;
6481 num_features_supported++;
6482 } else {
6483 pqi_firmware_feature_update(ctrl_info,
6484 &pqi_firmware_features[i]);
6485 }
6486 }
6487
6488 if (num_features_supported == 0)
6489 return;
6490
6491 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6492 if (!pqi_firmware_features[i].supported)
6493 continue;
6494 pqi_request_firmware_feature(firmware_features,
6495 pqi_firmware_features[i].feature_bit);
6496 }
6497
6498 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6499 firmware_features_iomem_addr);
6500 if (rc) {
6501 dev_err(&ctrl_info->pci_dev->dev,
6502 "failed to enable firmware features in PQI configuration table\n");
6503 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6504 if (!pqi_firmware_features[i].supported)
6505 continue;
6506 pqi_firmware_feature_update(ctrl_info,
6507 &pqi_firmware_features[i]);
6508 }
6509 return;
6510 }
6511
6512 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6513 if (!pqi_firmware_features[i].supported)
6514 continue;
6515 if (pqi_is_firmware_feature_enabled(firmware_features,
6516 firmware_features_iomem_addr,
6517 pqi_firmware_features[i].feature_bit))
6518 pqi_firmware_features[i].enabled = true;
6519 pqi_firmware_feature_update(ctrl_info,
6520 &pqi_firmware_features[i]);
6521 }
6522}
6523
6524static void pqi_init_firmware_features(void)
6525{
6526 unsigned int i;
6527
6528 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6529 pqi_firmware_features[i].supported = false;
6530 pqi_firmware_features[i].enabled = false;
6531 }
6532}
6533
6534static void pqi_process_firmware_features_section(
6535 struct pqi_config_table_section_info *section_info)
6536{
6537 mutex_lock(&pqi_firmware_features_mutex);
6538 pqi_init_firmware_features();
6539 pqi_process_firmware_features(section_info);
6540 mutex_unlock(&pqi_firmware_features_mutex);
6541}
6542
Kevin Barnett98f87662017-05-03 18:53:11 -05006543static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6544{
6545 u32 table_length;
6546 u32 section_offset;
6547 void __iomem *table_iomem_addr;
6548 struct pqi_config_table *config_table;
6549 struct pqi_config_table_section_header *section;
Kevin Barnettb212c252018-12-07 16:28:10 -06006550 struct pqi_config_table_section_info section_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006551
6552 table_length = ctrl_info->config_table_length;
Kevin Barnettb212c252018-12-07 16:28:10 -06006553 if (table_length == 0)
6554 return 0;
Kevin Barnett98f87662017-05-03 18:53:11 -05006555
6556 config_table = kmalloc(table_length, GFP_KERNEL);
6557 if (!config_table) {
6558 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006559 "failed to allocate memory for PQI configuration table\n");
Kevin Barnett98f87662017-05-03 18:53:11 -05006560 return -ENOMEM;
6561 }
6562
6563 /*
6564 * Copy the config table contents from I/O memory space into the
6565 * temporary buffer.
6566 */
6567 table_iomem_addr = ctrl_info->iomem_base +
6568 ctrl_info->config_table_offset;
6569 memcpy_fromio(config_table, table_iomem_addr, table_length);
6570
Kevin Barnettb212c252018-12-07 16:28:10 -06006571 section_info.ctrl_info = ctrl_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006572 section_offset =
6573 get_unaligned_le32(&config_table->first_section_offset);
6574
6575 while (section_offset) {
6576 section = (void *)config_table + section_offset;
6577
Kevin Barnettb212c252018-12-07 16:28:10 -06006578 section_info.section = section;
6579 section_info.section_offset = section_offset;
6580 section_info.section_iomem_addr =
6581 table_iomem_addr + section_offset;
6582
Kevin Barnett98f87662017-05-03 18:53:11 -05006583 switch (get_unaligned_le16(&section->section_id)) {
Kevin Barnettb212c252018-12-07 16:28:10 -06006584 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
6585 pqi_process_firmware_features_section(&section_info);
6586 break;
Kevin Barnett98f87662017-05-03 18:53:11 -05006587 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
Kevin Barnett5a259e32017-05-03 18:55:43 -05006588 if (pqi_disable_heartbeat)
6589 dev_warn(&ctrl_info->pci_dev->dev,
6590 "heartbeat disabled by module parameter\n");
6591 else
6592 ctrl_info->heartbeat_counter =
6593 table_iomem_addr +
6594 section_offset +
6595 offsetof(
6596 struct pqi_config_table_heartbeat,
6597 heartbeat_counter);
Kevin Barnett98f87662017-05-03 18:53:11 -05006598 break;
6599 }
6600
6601 section_offset =
6602 get_unaligned_le16(&section->next_section_offset);
6603 }
6604
6605 kfree(config_table);
6606
6607 return 0;
6608}
6609
Kevin Barnett162d7752017-05-03 18:52:46 -05006610/* Switches the controller from PQI mode back into SIS mode. */
6611
6612static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6613{
6614 int rc;
6615
Kevin Barnett061ef062017-05-03 18:53:05 -05006616 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
Kevin Barnett162d7752017-05-03 18:52:46 -05006617 rc = pqi_reset(ctrl_info);
6618 if (rc)
6619 return rc;
Kevin Barnett4f078e22017-08-10 13:46:57 -05006620 rc = sis_reenable_sis_mode(ctrl_info);
6621 if (rc) {
6622 dev_err(&ctrl_info->pci_dev->dev,
6623 "re-enabling SIS mode failed with error %d\n", rc);
6624 return rc;
6625 }
Kevin Barnett162d7752017-05-03 18:52:46 -05006626 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6627
6628 return 0;
6629}
6630
6631/*
6632 * If the controller isn't already in SIS mode, this function forces it into
6633 * SIS mode.
6634 */
6635
6636static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05006637{
6638 if (!sis_is_firmware_running(ctrl_info))
6639 return -ENXIO;
6640
Kevin Barnett162d7752017-05-03 18:52:46 -05006641 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6642 return 0;
6643
6644 if (sis_is_kernel_up(ctrl_info)) {
6645 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6646 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006647 }
6648
Kevin Barnett162d7752017-05-03 18:52:46 -05006649 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05006650}
6651
Kevin Barnett6c223762016-06-27 16:41:00 -05006652static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6653{
6654 int rc;
6655
Kevin Barnett162d7752017-05-03 18:52:46 -05006656 rc = pqi_force_sis_mode(ctrl_info);
6657 if (rc)
6658 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006659
6660 /*
6661 * Wait until the controller is ready to start accepting SIS
6662 * commands.
6663 */
6664 rc = sis_wait_for_ctrl_ready(ctrl_info);
Kevin Barnett8845fdf2017-05-03 18:53:36 -05006665 if (rc)
Kevin Barnett6c223762016-06-27 16:41:00 -05006666 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006667
6668 /*
6669 * Get the controller properties. This allows us to determine
6670 * whether or not it supports PQI mode.
6671 */
6672 rc = sis_get_ctrl_properties(ctrl_info);
6673 if (rc) {
6674 dev_err(&ctrl_info->pci_dev->dev,
6675 "error obtaining controller properties\n");
6676 return rc;
6677 }
6678
6679 rc = sis_get_pqi_capabilities(ctrl_info);
6680 if (rc) {
6681 dev_err(&ctrl_info->pci_dev->dev,
6682 "error obtaining controller capabilities\n");
6683 return rc;
6684 }
6685
Kevin Barnettd727a772017-05-03 18:54:25 -05006686 if (reset_devices) {
6687 if (ctrl_info->max_outstanding_requests >
6688 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6689 ctrl_info->max_outstanding_requests =
6690 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6691 } else {
6692 if (ctrl_info->max_outstanding_requests >
6693 PQI_MAX_OUTSTANDING_REQUESTS)
6694 ctrl_info->max_outstanding_requests =
6695 PQI_MAX_OUTSTANDING_REQUESTS;
6696 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006697
6698 pqi_calculate_io_resources(ctrl_info);
6699
6700 rc = pqi_alloc_error_buffer(ctrl_info);
6701 if (rc) {
6702 dev_err(&ctrl_info->pci_dev->dev,
6703 "failed to allocate PQI error buffer\n");
6704 return rc;
6705 }
6706
6707 /*
6708 * If the function we are about to call succeeds, the
6709 * controller will transition from legacy SIS mode
6710 * into PQI mode.
6711 */
6712 rc = sis_init_base_struct_addr(ctrl_info);
6713 if (rc) {
6714 dev_err(&ctrl_info->pci_dev->dev,
6715 "error initializing PQI mode\n");
6716 return rc;
6717 }
6718
6719 /* Wait for the controller to complete the SIS -> PQI transition. */
6720 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6721 if (rc) {
6722 dev_err(&ctrl_info->pci_dev->dev,
6723 "transition to PQI mode failed\n");
6724 return rc;
6725 }
6726
6727 /* From here on, we are running in PQI mode. */
6728 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006729 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05006730
6731 rc = pqi_alloc_admin_queues(ctrl_info);
6732 if (rc) {
6733 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006734 "failed to allocate admin queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006735 return rc;
6736 }
6737
6738 rc = pqi_create_admin_queues(ctrl_info);
6739 if (rc) {
6740 dev_err(&ctrl_info->pci_dev->dev,
6741 "error creating admin queues\n");
6742 return rc;
6743 }
6744
6745 rc = pqi_report_device_capability(ctrl_info);
6746 if (rc) {
6747 dev_err(&ctrl_info->pci_dev->dev,
6748 "obtaining device capability failed\n");
6749 return rc;
6750 }
6751
6752 rc = pqi_validate_device_capability(ctrl_info);
6753 if (rc)
6754 return rc;
6755
6756 pqi_calculate_queue_resources(ctrl_info);
6757
6758 rc = pqi_enable_msix_interrupts(ctrl_info);
6759 if (rc)
6760 return rc;
6761
6762 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6763 ctrl_info->max_msix_vectors =
6764 ctrl_info->num_msix_vectors_enabled;
6765 pqi_calculate_queue_resources(ctrl_info);
6766 }
6767
6768 rc = pqi_alloc_io_resources(ctrl_info);
6769 if (rc)
6770 return rc;
6771
6772 rc = pqi_alloc_operational_queues(ctrl_info);
Kevin Barnettd87d5472017-05-03 18:54:00 -05006773 if (rc) {
6774 dev_err(&ctrl_info->pci_dev->dev,
6775 "failed to allocate operational queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006776 return rc;
Kevin Barnettd87d5472017-05-03 18:54:00 -05006777 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006778
6779 pqi_init_operational_queues(ctrl_info);
6780
6781 rc = pqi_request_irqs(ctrl_info);
6782 if (rc)
6783 return rc;
6784
Kevin Barnett6c223762016-06-27 16:41:00 -05006785 rc = pqi_create_queues(ctrl_info);
6786 if (rc)
6787 return rc;
6788
Kevin Barnett061ef062017-05-03 18:53:05 -05006789 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6790
6791 ctrl_info->controller_online = true;
Kevin Barnettb212c252018-12-07 16:28:10 -06006792
6793 rc = pqi_process_config_table(ctrl_info);
6794 if (rc)
6795 return rc;
6796
Kevin Barnett061ef062017-05-03 18:53:05 -05006797 pqi_start_heartbeat_timer(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006798
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05006799 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006800 if (rc) {
6801 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05006802 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006803 return rc;
6804 }
6805
Kevin Barnett6c223762016-06-27 16:41:00 -05006806 /* Register with the SCSI subsystem. */
6807 rc = pqi_register_scsi(ctrl_info);
6808 if (rc)
6809 return rc;
6810
6811 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6812 if (rc) {
6813 dev_err(&ctrl_info->pci_dev->dev,
6814 "error obtaining firmware version\n");
6815 return rc;
6816 }
6817
Dave Carroll171c2862018-12-07 16:28:35 -06006818 rc = pqi_set_diag_rescan(ctrl_info);
6819 if (rc) {
6820 dev_err(&ctrl_info->pci_dev->dev,
6821 "error enabling multi-lun rescan\n");
6822 return rc;
6823 }
6824
Kevin Barnett6c223762016-06-27 16:41:00 -05006825 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6826 if (rc) {
6827 dev_err(&ctrl_info->pci_dev->dev,
6828 "error updating host wellness\n");
6829 return rc;
6830 }
6831
6832 pqi_schedule_update_time_worker(ctrl_info);
6833
6834 pqi_scan_scsi_devices(ctrl_info);
6835
6836 return 0;
6837}
6838
Kevin Barnett061ef062017-05-03 18:53:05 -05006839static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6840{
6841 unsigned int i;
6842 struct pqi_admin_queues *admin_queues;
6843 struct pqi_event_queue *event_queue;
6844
6845 admin_queues = &ctrl_info->admin_queues;
6846 admin_queues->iq_pi_copy = 0;
6847 admin_queues->oq_ci_copy = 0;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006848 writel(0, admin_queues->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006849
6850 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6851 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6852 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6853 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6854
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006855 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
6856 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
6857 writel(0, ctrl_info->queue_groups[i].oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006858 }
6859
6860 event_queue = &ctrl_info->event_queue;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006861 writel(0, event_queue->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006862 event_queue->oq_ci_copy = 0;
6863}
6864
6865static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6866{
6867 int rc;
6868
6869 rc = pqi_force_sis_mode(ctrl_info);
6870 if (rc)
6871 return rc;
6872
6873 /*
6874 * Wait until the controller is ready to start accepting SIS
6875 * commands.
6876 */
6877 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6878 if (rc)
6879 return rc;
6880
6881 /*
6882 * If the function we are about to call succeeds, the
6883 * controller will transition from legacy SIS mode
6884 * into PQI mode.
6885 */
6886 rc = sis_init_base_struct_addr(ctrl_info);
6887 if (rc) {
6888 dev_err(&ctrl_info->pci_dev->dev,
6889 "error initializing PQI mode\n");
6890 return rc;
6891 }
6892
6893 /* Wait for the controller to complete the SIS -> PQI transition. */
6894 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6895 if (rc) {
6896 dev_err(&ctrl_info->pci_dev->dev,
6897 "transition to PQI mode failed\n");
6898 return rc;
6899 }
6900
6901 /* From here on, we are running in PQI mode. */
6902 ctrl_info->pqi_mode_enabled = true;
6903 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6904
6905 pqi_reinit_queues(ctrl_info);
6906
6907 rc = pqi_create_admin_queues(ctrl_info);
6908 if (rc) {
6909 dev_err(&ctrl_info->pci_dev->dev,
6910 "error creating admin queues\n");
6911 return rc;
6912 }
6913
6914 rc = pqi_create_queues(ctrl_info);
6915 if (rc)
6916 return rc;
6917
6918 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6919
6920 ctrl_info->controller_online = true;
6921 pqi_start_heartbeat_timer(ctrl_info);
6922 pqi_ctrl_unblock_requests(ctrl_info);
6923
6924 rc = pqi_enable_events(ctrl_info);
6925 if (rc) {
6926 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006927 "error enabling events\n");
Kevin Barnett061ef062017-05-03 18:53:05 -05006928 return rc;
6929 }
6930
Dave Carroll171c2862018-12-07 16:28:35 -06006931 rc = pqi_set_diag_rescan(ctrl_info);
6932 if (rc) {
6933 dev_err(&ctrl_info->pci_dev->dev,
6934 "error enabling multi-lun rescan\n");
6935 return rc;
6936 }
6937
Kevin Barnett061ef062017-05-03 18:53:05 -05006938 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6939 if (rc) {
6940 dev_err(&ctrl_info->pci_dev->dev,
6941 "error updating host wellness\n");
6942 return rc;
6943 }
6944
6945 pqi_schedule_update_time_worker(ctrl_info);
6946
6947 pqi_scan_scsi_devices(ctrl_info);
6948
6949 return 0;
6950}
6951
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006952static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6953 u16 timeout)
6954{
6955 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6956 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6957}
6958
Kevin Barnett6c223762016-06-27 16:41:00 -05006959static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6960{
6961 int rc;
6962 u64 mask;
6963
6964 rc = pci_enable_device(ctrl_info->pci_dev);
6965 if (rc) {
6966 dev_err(&ctrl_info->pci_dev->dev,
6967 "failed to enable PCI device\n");
6968 return rc;
6969 }
6970
6971 if (sizeof(dma_addr_t) > 4)
6972 mask = DMA_BIT_MASK(64);
6973 else
6974 mask = DMA_BIT_MASK(32);
6975
6976 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6977 if (rc) {
6978 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6979 goto disable_device;
6980 }
6981
6982 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6983 if (rc) {
6984 dev_err(&ctrl_info->pci_dev->dev,
6985 "failed to obtain PCI resources\n");
6986 goto disable_device;
6987 }
6988
6989 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6990 ctrl_info->pci_dev, 0),
6991 sizeof(struct pqi_ctrl_registers));
6992 if (!ctrl_info->iomem_base) {
6993 dev_err(&ctrl_info->pci_dev->dev,
6994 "failed to map memory for controller registers\n");
6995 rc = -ENOMEM;
6996 goto release_regions;
6997 }
6998
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006999#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7000
7001 /* Increase the PCIe completion timeout. */
7002 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7003 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7004 if (rc) {
7005 dev_err(&ctrl_info->pci_dev->dev,
7006 "failed to set PCIe completion timeout\n");
7007 goto release_regions;
7008 }
7009
Kevin Barnett6c223762016-06-27 16:41:00 -05007010 /* Enable bus mastering. */
7011 pci_set_master(ctrl_info->pci_dev);
7012
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05007013 ctrl_info->registers = ctrl_info->iomem_base;
7014 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7015
Kevin Barnett6c223762016-06-27 16:41:00 -05007016 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7017
7018 return 0;
7019
7020release_regions:
7021 pci_release_regions(ctrl_info->pci_dev);
7022disable_device:
7023 pci_disable_device(ctrl_info->pci_dev);
7024
7025 return rc;
7026}
7027
7028static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7029{
7030 iounmap(ctrl_info->iomem_base);
7031 pci_release_regions(ctrl_info->pci_dev);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05007032 if (pci_is_enabled(ctrl_info->pci_dev))
7033 pci_disable_device(ctrl_info->pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007034 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7035}
7036
7037static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7038{
7039 struct pqi_ctrl_info *ctrl_info;
7040
7041 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7042 GFP_KERNEL, numa_node);
7043 if (!ctrl_info)
7044 return NULL;
7045
7046 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05007047 mutex_init(&ctrl_info->lun_reset_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05007048
7049 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7050 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7051
7052 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7053 atomic_set(&ctrl_info->num_interrupts, 0);
7054
7055 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7056 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7057
Kees Cook74a0f572017-10-11 16:27:10 -07007058 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
Kevin Barnett5f310422017-05-03 18:54:55 -05007059 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
Kevin Barnett98f87662017-05-03 18:53:11 -05007060
Kevin Barnett6c223762016-06-27 16:41:00 -05007061 sema_init(&ctrl_info->sync_request_sem,
7062 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05007063 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05007064
Kevin Barnett376fb882017-05-03 18:54:43 -05007065 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7066 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7067 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7068 pqi_raid_bypass_retry_worker);
7069
Kevin Barnett6c223762016-06-27 16:41:00 -05007070 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
Kevin Barnett061ef062017-05-03 18:53:05 -05007071 ctrl_info->irq_mode = IRQ_MODE_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -05007072 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7073
7074 return ctrl_info;
7075}
7076
7077static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7078{
7079 kfree(ctrl_info);
7080}
7081
7082static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7083{
Kevin Barnett98bf0612017-05-03 18:52:28 -05007084 pqi_free_irqs(ctrl_info);
7085 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007086}
7087
7088static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7089{
7090 pqi_stop_heartbeat_timer(ctrl_info);
7091 pqi_free_interrupts(ctrl_info);
7092 if (ctrl_info->queue_memory_base)
7093 dma_free_coherent(&ctrl_info->pci_dev->dev,
7094 ctrl_info->queue_memory_length,
7095 ctrl_info->queue_memory_base,
7096 ctrl_info->queue_memory_base_dma_handle);
7097 if (ctrl_info->admin_queue_memory_base)
7098 dma_free_coherent(&ctrl_info->pci_dev->dev,
7099 ctrl_info->admin_queue_memory_length,
7100 ctrl_info->admin_queue_memory_base,
7101 ctrl_info->admin_queue_memory_base_dma_handle);
7102 pqi_free_all_io_requests(ctrl_info);
7103 if (ctrl_info->error_buffer)
7104 dma_free_coherent(&ctrl_info->pci_dev->dev,
7105 ctrl_info->error_buffer_length,
7106 ctrl_info->error_buffer,
7107 ctrl_info->error_buffer_dma_handle);
7108 if (ctrl_info->iomem_base)
7109 pqi_cleanup_pci_init(ctrl_info);
7110 pqi_free_ctrl_info(ctrl_info);
7111}
7112
7113static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7114{
Kevin Barnett061ef062017-05-03 18:53:05 -05007115 pqi_cancel_rescan_worker(ctrl_info);
7116 pqi_cancel_update_time_worker(ctrl_info);
Kevin Barnette57a1f92016-08-31 14:54:47 -05007117 pqi_remove_all_scsi_devices(ctrl_info);
7118 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05007119 if (ctrl_info->pqi_mode_enabled)
7120 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007121 pqi_free_ctrl_resources(ctrl_info);
7122}
7123
Kevin Barnett3c509762017-05-03 18:54:37 -05007124static void pqi_perform_lockup_action(void)
7125{
7126 switch (pqi_lockup_action) {
7127 case PANIC:
7128 panic("FATAL: Smart Family Controller lockup detected");
7129 break;
7130 case REBOOT:
7131 emergency_restart();
7132 break;
7133 case NONE:
7134 default:
7135 break;
7136 }
7137}
7138
Kevin Barnett5f310422017-05-03 18:54:55 -05007139static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7140 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7141 .status = SAM_STAT_CHECK_CONDITION,
7142};
7143
7144static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05007145{
7146 unsigned int i;
Kevin Barnett376fb882017-05-03 18:54:43 -05007147 struct pqi_io_request *io_request;
Kevin Barnett376fb882017-05-03 18:54:43 -05007148 struct scsi_cmnd *scmd;
7149
Kevin Barnett5f310422017-05-03 18:54:55 -05007150 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7151 io_request = &ctrl_info->io_request_pool[i];
7152 if (atomic_read(&io_request->refcount) == 0)
7153 continue;
Kevin Barnett376fb882017-05-03 18:54:43 -05007154
Kevin Barnett5f310422017-05-03 18:54:55 -05007155 scmd = io_request->scmd;
7156 if (scmd) {
7157 set_host_byte(scmd, DID_NO_CONNECT);
7158 } else {
7159 io_request->status = -ENXIO;
7160 io_request->error_info =
7161 &pqi_ctrl_offline_raid_error_info;
Kevin Barnett376fb882017-05-03 18:54:43 -05007162 }
Kevin Barnett5f310422017-05-03 18:54:55 -05007163
7164 io_request->io_complete_callback(io_request,
7165 io_request->context);
Kevin Barnett376fb882017-05-03 18:54:43 -05007166 }
7167}
7168
Kevin Barnett5f310422017-05-03 18:54:55 -05007169static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05007170{
Kevin Barnett5f310422017-05-03 18:54:55 -05007171 pqi_perform_lockup_action();
7172 pqi_stop_heartbeat_timer(ctrl_info);
7173 pqi_free_interrupts(ctrl_info);
7174 pqi_cancel_rescan_worker(ctrl_info);
7175 pqi_cancel_update_time_worker(ctrl_info);
7176 pqi_ctrl_wait_until_quiesced(ctrl_info);
7177 pqi_fail_all_outstanding_requests(ctrl_info);
7178 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7179 pqi_ctrl_unblock_requests(ctrl_info);
7180}
7181
7182static void pqi_ctrl_offline_worker(struct work_struct *work)
7183{
7184 struct pqi_ctrl_info *ctrl_info;
7185
7186 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7187 pqi_take_ctrl_offline_deferred(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05007188}
7189
7190static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7191{
Kevin Barnett5f310422017-05-03 18:54:55 -05007192 if (!ctrl_info->controller_online)
7193 return;
7194
Kevin Barnett376fb882017-05-03 18:54:43 -05007195 ctrl_info->controller_online = false;
Kevin Barnett5f310422017-05-03 18:54:55 -05007196 ctrl_info->pqi_mode_enabled = false;
7197 pqi_ctrl_block_requests(ctrl_info);
Kevin Barnett5a259e32017-05-03 18:55:43 -05007198 if (!pqi_disable_ctrl_shutdown)
7199 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05007200 pci_disable_device(ctrl_info->pci_dev);
7201 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5f310422017-05-03 18:54:55 -05007202 schedule_work(&ctrl_info->ctrl_offline_work);
Kevin Barnett376fb882017-05-03 18:54:43 -05007203}
7204
Kevin Barnettd91d7822017-05-03 18:53:30 -05007205static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007206 const struct pci_device_id *id)
7207{
7208 char *ctrl_description;
7209
Kevin Barnett37b36842017-05-03 18:55:01 -05007210 if (id->driver_data)
Kevin Barnett6c223762016-06-27 16:41:00 -05007211 ctrl_description = (char *)id->driver_data;
Kevin Barnett37b36842017-05-03 18:55:01 -05007212 else
7213 ctrl_description = "Microsemi Smart Family Controller";
Kevin Barnett6c223762016-06-27 16:41:00 -05007214
Kevin Barnettd91d7822017-05-03 18:53:30 -05007215 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
Kevin Barnett6c223762016-06-27 16:41:00 -05007216}
7217
Kevin Barnettd91d7822017-05-03 18:53:30 -05007218static int pqi_pci_probe(struct pci_dev *pci_dev,
7219 const struct pci_device_id *id)
Kevin Barnett6c223762016-06-27 16:41:00 -05007220{
7221 int rc;
Sagar Biradar62dc51f2018-12-07 16:29:12 -06007222 int node, cp_node;
Kevin Barnett6c223762016-06-27 16:41:00 -05007223 struct pqi_ctrl_info *ctrl_info;
7224
Kevin Barnettd91d7822017-05-03 18:53:30 -05007225 pqi_print_ctrl_info(pci_dev, id);
Kevin Barnett6c223762016-06-27 16:41:00 -05007226
7227 if (pqi_disable_device_id_wildcards &&
7228 id->subvendor == PCI_ANY_ID &&
7229 id->subdevice == PCI_ANY_ID) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05007230 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007231 "controller not probed because device ID wildcards are disabled\n");
7232 return -ENODEV;
7233 }
7234
7235 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
Kevin Barnettd91d7822017-05-03 18:53:30 -05007236 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007237 "controller device ID matched using wildcards\n");
7238
Kevin Barnettd91d7822017-05-03 18:53:30 -05007239 node = dev_to_node(&pci_dev->dev);
Sagar Biradar62dc51f2018-12-07 16:29:12 -06007240 if (node == NUMA_NO_NODE) {
7241 cp_node = cpu_to_node(0);
7242 if (cp_node == NUMA_NO_NODE)
7243 cp_node = 0;
7244 set_dev_node(&pci_dev->dev, cp_node);
7245 }
Kevin Barnett6c223762016-06-27 16:41:00 -05007246
7247 ctrl_info = pqi_alloc_ctrl_info(node);
7248 if (!ctrl_info) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05007249 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007250 "failed to allocate controller info block\n");
7251 return -ENOMEM;
7252 }
7253
Kevin Barnettd91d7822017-05-03 18:53:30 -05007254 ctrl_info->pci_dev = pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05007255
7256 rc = pqi_pci_init(ctrl_info);
7257 if (rc)
7258 goto error;
7259
7260 rc = pqi_ctrl_init(ctrl_info);
7261 if (rc)
7262 goto error;
7263
7264 return 0;
7265
7266error:
7267 pqi_remove_ctrl(ctrl_info);
7268
7269 return rc;
7270}
7271
Kevin Barnettd91d7822017-05-03 18:53:30 -05007272static void pqi_pci_remove(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05007273{
7274 struct pqi_ctrl_info *ctrl_info;
7275
Kevin Barnettd91d7822017-05-03 18:53:30 -05007276 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007277 if (!ctrl_info)
7278 return;
7279
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06007280 ctrl_info->in_shutdown = true;
7281
Kevin Barnett6c223762016-06-27 16:41:00 -05007282 pqi_remove_ctrl(ctrl_info);
7283}
7284
Kevin Barnettd91d7822017-05-03 18:53:30 -05007285static void pqi_shutdown(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05007286{
7287 int rc;
7288 struct pqi_ctrl_info *ctrl_info;
7289
Kevin Barnettd91d7822017-05-03 18:53:30 -05007290 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007291 if (!ctrl_info)
7292 goto error;
7293
7294 /*
7295 * Write all data in the controller's battery-backed cache to
7296 * storage.
7297 */
Kevin Barnett58322fe2017-08-10 13:46:45 -05007298 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
Kevin Barnettb6d47812017-08-10 13:47:03 -05007299 pqi_reset(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007300 if (rc == 0)
7301 return;
7302
7303error:
Kevin Barnettd91d7822017-05-03 18:53:30 -05007304 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007305 "unable to flush controller cache\n");
7306}
7307
Kevin Barnett3c509762017-05-03 18:54:37 -05007308static void pqi_process_lockup_action_param(void)
7309{
7310 unsigned int i;
7311
7312 if (!pqi_lockup_action_param)
7313 return;
7314
7315 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7316 if (strcmp(pqi_lockup_action_param,
7317 pqi_lockup_actions[i].name) == 0) {
7318 pqi_lockup_action = pqi_lockup_actions[i].action;
7319 return;
7320 }
7321 }
7322
7323 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
7324 DRIVER_NAME_SHORT, pqi_lockup_action_param);
7325}
7326
7327static void pqi_process_module_params(void)
7328{
7329 pqi_process_lockup_action_param();
7330}
7331
Arnd Bergmann5c146682017-05-18 10:32:18 +02007332static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
Kevin Barnett061ef062017-05-03 18:53:05 -05007333{
7334 struct pqi_ctrl_info *ctrl_info;
7335
7336 ctrl_info = pci_get_drvdata(pci_dev);
7337
7338 pqi_disable_events(ctrl_info);
7339 pqi_cancel_update_time_worker(ctrl_info);
7340 pqi_cancel_rescan_worker(ctrl_info);
7341 pqi_wait_until_scan_finished(ctrl_info);
7342 pqi_wait_until_lun_reset_finished(ctrl_info);
Kevin Barnett58322fe2017-08-10 13:46:45 -05007343 pqi_flush_cache(ctrl_info, SUSPEND);
Kevin Barnett061ef062017-05-03 18:53:05 -05007344 pqi_ctrl_block_requests(ctrl_info);
7345 pqi_ctrl_wait_until_quiesced(ctrl_info);
7346 pqi_wait_until_inbound_queues_empty(ctrl_info);
7347 pqi_ctrl_wait_for_pending_io(ctrl_info);
7348 pqi_stop_heartbeat_timer(ctrl_info);
7349
7350 if (state.event == PM_EVENT_FREEZE)
7351 return 0;
7352
7353 pci_save_state(pci_dev);
7354 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
7355
7356 ctrl_info->controller_online = false;
7357 ctrl_info->pqi_mode_enabled = false;
7358
7359 return 0;
7360}
7361
Arnd Bergmann5c146682017-05-18 10:32:18 +02007362static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
Kevin Barnett061ef062017-05-03 18:53:05 -05007363{
7364 int rc;
7365 struct pqi_ctrl_info *ctrl_info;
7366
7367 ctrl_info = pci_get_drvdata(pci_dev);
7368
7369 if (pci_dev->current_state != PCI_D0) {
7370 ctrl_info->max_hw_queue_index = 0;
7371 pqi_free_interrupts(ctrl_info);
7372 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
7373 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
7374 IRQF_SHARED, DRIVER_NAME_SHORT,
7375 &ctrl_info->queue_groups[0]);
7376 if (rc) {
7377 dev_err(&ctrl_info->pci_dev->dev,
7378 "irq %u init failed with error %d\n",
7379 pci_dev->irq, rc);
7380 return rc;
7381 }
7382 pqi_start_heartbeat_timer(ctrl_info);
7383 pqi_ctrl_unblock_requests(ctrl_info);
7384 return 0;
7385 }
7386
7387 pci_set_power_state(pci_dev, PCI_D0);
7388 pci_restore_state(pci_dev);
7389
7390 return pqi_ctrl_init_resume(ctrl_info);
7391}
7392
Kevin Barnett6c223762016-06-27 16:41:00 -05007393/* Define the PCI IDs for the controllers that we support. */
7394static const struct pci_device_id pqi_pci_id_table[] = {
7395 {
7396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007397 0x105b, 0x1211)
7398 },
7399 {
7400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7401 0x105b, 0x1321)
7402 },
7403 {
7404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007405 0x152d, 0x8a22)
7406 },
7407 {
7408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7409 0x152d, 0x8a23)
7410 },
7411 {
7412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7413 0x152d, 0x8a24)
7414 },
7415 {
7416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7417 0x152d, 0x8a36)
7418 },
7419 {
7420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7421 0x152d, 0x8a37)
7422 },
7423 {
7424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007425 0x193d, 0x8460)
7426 },
7427 {
7428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7429 0x193d, 0x8461)
7430 },
7431 {
7432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Murthy Bhat84a77fe2018-12-07 16:28:53 -06007433 0x193d, 0xc460)
7434 },
7435 {
7436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7437 0x193d, 0xc461)
7438 },
7439 {
7440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007441 0x193d, 0xf460)
7442 },
7443 {
7444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7445 0x193d, 0xf461)
7446 },
7447 {
7448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7449 0x1bd4, 0x0045)
7450 },
7451 {
7452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7453 0x1bd4, 0x0046)
7454 },
7455 {
7456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7457 0x1bd4, 0x0047)
7458 },
7459 {
7460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7461 0x1bd4, 0x0048)
7462 },
7463 {
7464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007465 0x1bd4, 0x004a)
7466 },
7467 {
7468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7469 0x1bd4, 0x004b)
7470 },
7471 {
7472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7473 0x1bd4, 0x004c)
7474 },
7475 {
7476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Ajish Koshyc1b10472018-12-07 16:29:18 -06007477 0x19e5, 0xd227)
7478 },
7479 {
7480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7481 0x19e5, 0xd228)
7482 },
7483 {
7484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7485 0x19e5, 0xd229)
7486 },
7487 {
7488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7489 0x19e5, 0xd22a)
7490 },
7491 {
7492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7493 0x19e5, 0xd22b)
7494 },
7495 {
7496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7497 0x19e5, 0xd22c)
7498 },
7499 {
7500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007501 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
7502 },
7503 {
7504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007505 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
Kevin Barnett6c223762016-06-27 16:41:00 -05007506 },
7507 {
7508 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7509 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
7510 },
7511 {
7512 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7513 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
7514 },
7515 {
7516 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7517 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
7518 },
7519 {
7520 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7521 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
7522 },
7523 {
7524 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7525 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
7526 },
7527 {
7528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7529 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
7530 },
7531 {
7532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007533 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
7534 },
7535 {
7536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007537 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
7538 },
7539 {
7540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007541 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
7542 },
7543 {
7544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7545 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
7546 },
7547 {
7548 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7549 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
7550 },
7551 {
7552 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7553 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
7554 },
7555 {
7556 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7557 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
7558 },
7559 {
7560 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7561 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
7562 },
7563 {
7564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7565 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
7566 },
7567 {
7568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007569 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
7570 },
7571 {
7572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7573 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
7574 },
7575 {
7576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007577 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
7578 },
7579 {
7580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007581 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
7582 },
7583 {
7584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7585 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
7586 },
7587 {
7588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7589 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
7590 },
7591 {
7592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7593 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
7594 },
7595 {
7596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7597 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
7598 },
7599 {
7600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007601 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
7602 },
7603 {
7604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007605 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
7606 },
7607 {
7608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7609 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
7610 },
7611 {
7612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettbd809e82017-09-27 16:29:59 -05007613 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
7614 },
7615 {
7616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7617 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
7618 },
7619 {
7620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007621 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
7622 },
7623 {
7624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007625 PCI_VENDOR_ID_ADVANTECH, 0x8312)
7626 },
7627 {
7628 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007629 PCI_VENDOR_ID_DELL, 0x1fe0)
7630 },
7631 {
7632 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007633 PCI_VENDOR_ID_HP, 0x0600)
7634 },
7635 {
7636 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7637 PCI_VENDOR_ID_HP, 0x0601)
7638 },
7639 {
7640 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7641 PCI_VENDOR_ID_HP, 0x0602)
7642 },
7643 {
7644 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7645 PCI_VENDOR_ID_HP, 0x0603)
7646 },
7647 {
7648 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007649 PCI_VENDOR_ID_HP, 0x0609)
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007650 },
7651 {
7652 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7653 PCI_VENDOR_ID_HP, 0x0650)
7654 },
7655 {
7656 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7657 PCI_VENDOR_ID_HP, 0x0651)
7658 },
7659 {
7660 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7661 PCI_VENDOR_ID_HP, 0x0652)
7662 },
7663 {
7664 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7665 PCI_VENDOR_ID_HP, 0x0653)
7666 },
7667 {
7668 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7669 PCI_VENDOR_ID_HP, 0x0654)
7670 },
7671 {
7672 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7673 PCI_VENDOR_ID_HP, 0x0655)
7674 },
7675 {
7676 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007677 PCI_VENDOR_ID_HP, 0x0700)
7678 },
7679 {
7680 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7681 PCI_VENDOR_ID_HP, 0x0701)
7682 },
7683 {
7684 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007685 PCI_VENDOR_ID_HP, 0x1001)
7686 },
7687 {
7688 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7689 PCI_VENDOR_ID_HP, 0x1100)
7690 },
7691 {
7692 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7693 PCI_VENDOR_ID_HP, 0x1101)
7694 },
7695 {
7696 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007697 PCI_ANY_ID, PCI_ANY_ID)
7698 },
7699 { 0 }
7700};
7701
7702MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7703
7704static struct pci_driver pqi_pci_driver = {
7705 .name = DRIVER_NAME_SHORT,
7706 .id_table = pqi_pci_id_table,
7707 .probe = pqi_pci_probe,
7708 .remove = pqi_pci_remove,
7709 .shutdown = pqi_shutdown,
Kevin Barnett061ef062017-05-03 18:53:05 -05007710#if defined(CONFIG_PM)
7711 .suspend = pqi_suspend,
7712 .resume = pqi_resume,
7713#endif
Kevin Barnett6c223762016-06-27 16:41:00 -05007714};
7715
7716static int __init pqi_init(void)
7717{
7718 int rc;
7719
7720 pr_info(DRIVER_NAME "\n");
7721
7722 pqi_sas_transport_template =
7723 sas_attach_transport(&pqi_sas_transport_functions);
7724 if (!pqi_sas_transport_template)
7725 return -ENODEV;
7726
Kevin Barnett3c509762017-05-03 18:54:37 -05007727 pqi_process_module_params();
7728
Kevin Barnett6c223762016-06-27 16:41:00 -05007729 rc = pci_register_driver(&pqi_pci_driver);
7730 if (rc)
7731 sas_release_transport(pqi_sas_transport_template);
7732
7733 return rc;
7734}
7735
7736static void __exit pqi_cleanup(void)
7737{
7738 pci_unregister_driver(&pqi_pci_driver);
7739 sas_release_transport(pqi_sas_transport_template);
7740}
7741
7742module_init(pqi_init);
7743module_exit(pqi_cleanup);
7744
7745static void __attribute__((unused)) verify_structures(void)
7746{
7747 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7748 sis_host_to_ctrl_doorbell) != 0x20);
7749 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7750 sis_interrupt_mask) != 0x34);
7751 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7752 sis_ctrl_to_host_doorbell) != 0x9c);
7753 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7754 sis_ctrl_to_host_doorbell_clear) != 0xa0);
7755 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05007756 sis_driver_scratch) != 0xb0);
7757 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05007758 sis_firmware_status) != 0xbc);
7759 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7760 sis_mailbox) != 0x1000);
7761 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7762 pqi_registers) != 0x4000);
7763
7764 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7765 iu_type) != 0x0);
7766 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7767 iu_length) != 0x2);
7768 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7769 response_queue_id) != 0x4);
7770 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7771 work_area) != 0x6);
7772 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7773
7774 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7775 status) != 0x0);
7776 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7777 service_response) != 0x1);
7778 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7779 data_present) != 0x2);
7780 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7781 reserved) != 0x3);
7782 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7783 residual_count) != 0x4);
7784 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7785 data_length) != 0x8);
7786 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7787 reserved1) != 0xa);
7788 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7789 data) != 0xc);
7790 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7791
7792 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7793 data_in_result) != 0x0);
7794 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7795 data_out_result) != 0x1);
7796 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7797 reserved) != 0x2);
7798 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7799 status) != 0x5);
7800 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7801 status_qualifier) != 0x6);
7802 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7803 sense_data_length) != 0x8);
7804 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7805 response_data_length) != 0xa);
7806 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7807 data_in_transferred) != 0xc);
7808 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7809 data_out_transferred) != 0x10);
7810 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7811 data) != 0x14);
7812 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7813
7814 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7815 signature) != 0x0);
7816 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7817 function_and_status_code) != 0x8);
7818 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7819 max_admin_iq_elements) != 0x10);
7820 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7821 max_admin_oq_elements) != 0x11);
7822 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7823 admin_iq_element_length) != 0x12);
7824 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7825 admin_oq_element_length) != 0x13);
7826 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7827 max_reset_timeout) != 0x14);
7828 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7829 legacy_intx_status) != 0x18);
7830 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7831 legacy_intx_mask_set) != 0x1c);
7832 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7833 legacy_intx_mask_clear) != 0x20);
7834 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7835 device_status) != 0x40);
7836 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7837 admin_iq_pi_offset) != 0x48);
7838 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7839 admin_oq_ci_offset) != 0x50);
7840 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7841 admin_iq_element_array_addr) != 0x58);
7842 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7843 admin_oq_element_array_addr) != 0x60);
7844 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7845 admin_iq_ci_addr) != 0x68);
7846 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7847 admin_oq_pi_addr) != 0x70);
7848 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7849 admin_iq_num_elements) != 0x78);
7850 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7851 admin_oq_num_elements) != 0x79);
7852 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7853 admin_queue_int_msg_num) != 0x7a);
7854 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7855 device_error) != 0x80);
7856 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7857 error_details) != 0x88);
7858 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7859 device_reset) != 0x90);
7860 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7861 power_action) != 0x94);
7862 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7863
7864 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7865 header.iu_type) != 0);
7866 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7867 header.iu_length) != 2);
7868 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7869 header.work_area) != 6);
7870 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7871 request_id) != 8);
7872 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7873 function_code) != 10);
7874 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7875 data.report_device_capability.buffer_length) != 44);
7876 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7877 data.report_device_capability.sg_descriptor) != 48);
7878 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7879 data.create_operational_iq.queue_id) != 12);
7880 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7881 data.create_operational_iq.element_array_addr) != 16);
7882 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7883 data.create_operational_iq.ci_addr) != 24);
7884 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7885 data.create_operational_iq.num_elements) != 32);
7886 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7887 data.create_operational_iq.element_length) != 34);
7888 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7889 data.create_operational_iq.queue_protocol) != 36);
7890 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7891 data.create_operational_oq.queue_id) != 12);
7892 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7893 data.create_operational_oq.element_array_addr) != 16);
7894 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7895 data.create_operational_oq.pi_addr) != 24);
7896 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7897 data.create_operational_oq.num_elements) != 32);
7898 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7899 data.create_operational_oq.element_length) != 34);
7900 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7901 data.create_operational_oq.queue_protocol) != 36);
7902 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7903 data.create_operational_oq.int_msg_num) != 40);
7904 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7905 data.create_operational_oq.coalescing_count) != 42);
7906 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7907 data.create_operational_oq.min_coalescing_time) != 44);
7908 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7909 data.create_operational_oq.max_coalescing_time) != 48);
7910 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7911 data.delete_operational_queue.queue_id) != 12);
7912 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7913 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7914 data.create_operational_iq) != 64 - 11);
7915 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7916 data.create_operational_oq) != 64 - 11);
7917 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7918 data.delete_operational_queue) != 64 - 11);
7919
7920 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7921 header.iu_type) != 0);
7922 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7923 header.iu_length) != 2);
7924 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7925 header.work_area) != 6);
7926 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7927 request_id) != 8);
7928 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7929 function_code) != 10);
7930 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7931 status) != 11);
7932 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7933 data.create_operational_iq.status_descriptor) != 12);
7934 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7935 data.create_operational_iq.iq_pi_offset) != 16);
7936 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7937 data.create_operational_oq.status_descriptor) != 12);
7938 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7939 data.create_operational_oq.oq_ci_offset) != 16);
7940 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7941
7942 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7943 header.iu_type) != 0);
7944 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7945 header.iu_length) != 2);
7946 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7947 header.response_queue_id) != 4);
7948 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7949 header.work_area) != 6);
7950 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7951 request_id) != 8);
7952 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7953 nexus_id) != 10);
7954 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7955 buffer_length) != 12);
7956 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7957 lun_number) != 16);
7958 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7959 protocol_specific) != 24);
7960 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7961 error_index) != 27);
7962 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7963 cdb) != 32);
7964 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7965 sg_descriptors) != 64);
7966 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7967 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7968
7969 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7970 header.iu_type) != 0);
7971 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7972 header.iu_length) != 2);
7973 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7974 header.response_queue_id) != 4);
7975 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7976 header.work_area) != 6);
7977 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7978 request_id) != 8);
7979 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7980 nexus_id) != 12);
7981 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7982 buffer_length) != 16);
7983 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7984 data_encryption_key_index) != 22);
7985 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7986 encrypt_tweak_lower) != 24);
7987 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7988 encrypt_tweak_upper) != 28);
7989 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7990 cdb) != 32);
7991 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7992 error_index) != 48);
7993 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7994 num_sg_descriptors) != 50);
7995 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7996 cdb_length) != 51);
7997 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7998 lun_number) != 52);
7999 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8000 sg_descriptors) != 64);
8001 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8002 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8003
8004 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8005 header.iu_type) != 0);
8006 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8007 header.iu_length) != 2);
8008 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8009 request_id) != 8);
8010 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8011 error_index) != 10);
8012
8013 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8014 header.iu_type) != 0);
8015 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8016 header.iu_length) != 2);
8017 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8018 header.response_queue_id) != 4);
8019 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8020 request_id) != 8);
8021 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8022 data.report_event_configuration.buffer_length) != 12);
8023 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8024 data.report_event_configuration.sg_descriptors) != 16);
8025 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8026 data.set_event_configuration.global_event_oq_id) != 10);
8027 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8028 data.set_event_configuration.buffer_length) != 12);
8029 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8030 data.set_event_configuration.sg_descriptors) != 16);
8031
8032 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8033 max_inbound_iu_length) != 6);
8034 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8035 max_outbound_iu_length) != 14);
8036 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8037
8038 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8039 data_length) != 0);
8040 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8041 iq_arbitration_priority_support_bitmask) != 8);
8042 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8043 maximum_aw_a) != 9);
8044 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8045 maximum_aw_b) != 10);
8046 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8047 maximum_aw_c) != 11);
8048 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8049 max_inbound_queues) != 16);
8050 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8051 max_elements_per_iq) != 18);
8052 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8053 max_iq_element_length) != 24);
8054 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8055 min_iq_element_length) != 26);
8056 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8057 max_outbound_queues) != 30);
8058 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8059 max_elements_per_oq) != 32);
8060 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8061 intr_coalescing_time_granularity) != 34);
8062 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8063 max_oq_element_length) != 36);
8064 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8065 min_oq_element_length) != 38);
8066 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8067 iu_layer_descriptors) != 64);
8068 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8069
8070 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8071 event_type) != 0);
8072 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8073 oq_id) != 2);
8074 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8075
8076 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8077 num_event_descriptors) != 2);
8078 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8079 descriptors) != 4);
8080
Kevin Barnett061ef062017-05-03 18:53:05 -05008081 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8082 ARRAY_SIZE(pqi_supported_event_types));
8083
Kevin Barnett6c223762016-06-27 16:41:00 -05008084 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8085 header.iu_type) != 0);
8086 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8087 header.iu_length) != 2);
8088 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8089 event_type) != 8);
8090 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8091 event_id) != 10);
8092 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8093 additional_event_id) != 12);
8094 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8095 data) != 16);
8096 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8097
8098 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8099 header.iu_type) != 0);
8100 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8101 header.iu_length) != 2);
8102 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8103 event_type) != 8);
8104 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8105 event_id) != 10);
8106 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8107 additional_event_id) != 12);
8108 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8109
8110 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8111 header.iu_type) != 0);
8112 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8113 header.iu_length) != 2);
8114 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8115 request_id) != 8);
8116 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8117 nexus_id) != 10);
8118 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8119 lun_number) != 16);
8120 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8121 protocol_specific) != 24);
8122 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8123 outbound_queue_id_to_manage) != 26);
8124 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8125 request_id_to_manage) != 28);
8126 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8127 task_management_function) != 30);
8128 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8129
8130 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8131 header.iu_type) != 0);
8132 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8133 header.iu_length) != 2);
8134 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8135 request_id) != 8);
8136 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8137 nexus_id) != 10);
8138 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8139 additional_response_info) != 12);
8140 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8141 response_code) != 15);
8142 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8143
8144 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8145 configured_logical_drive_count) != 0);
8146 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8147 configuration_signature) != 1);
8148 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8149 firmware_version) != 5);
8150 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8151 extended_logical_unit_count) != 154);
8152 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8153 firmware_build_number) != 190);
8154 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8155 controller_mode) != 292);
8156
Kevin Barnett1be42f42017-05-03 18:53:42 -05008157 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8158 phys_bay_in_box) != 115);
8159 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8160 device_type) != 120);
8161 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8162 redundant_path_present_map) != 1736);
8163 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8164 active_path_number) != 1738);
8165 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8166 alternate_paths_phys_connector) != 1739);
8167 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8168 alternate_paths_phys_box_on_port) != 1755);
8169 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8170 current_queue_depth_limit) != 1796);
8171 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8172
Kevin Barnett6c223762016-06-27 16:41:00 -05008173 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8174 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8175 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8176 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8177 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8178 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8179 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8180 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8181 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8182 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8183 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8184 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8185
8186 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
Kevin Barnettd727a772017-05-03 18:54:25 -05008187 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8188 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
Kevin Barnett6c223762016-06-27 16:41:00 -05008189}