blob: c2d09eb8a5281a503a1097a8ab95027746605e3a [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
Kevin Barnettb805dbf2017-05-03 18:54:06 -05003 * Copyright (c) 2016-2017 Microsemi Corporation
Kevin Barnett6c223762016-06-27 16:41:00 -05004 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
Kevin Barnett3c509762017-05-03 18:54:37 -050027#include <linux/reboot.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050028#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060029#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050030#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
Don Bracef7cb8ac2018-12-07 16:30:12 -060043#define DRIVER_VERSION "1.2.4-065"
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050044#define DRIVER_MAJOR 1
Don Bracef7cb8ac2018-12-07 16:30:12 -060045#define DRIVER_MINOR 2
Don Brace61c187e2018-03-21 13:32:37 -050046#define DRIVER_RELEASE 4
Don Bracef7cb8ac2018-12-07 16:30:12 -060047#define DRIVER_REVISION 65
Kevin Barnett6c223762016-06-27 16:41:00 -050048
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050049#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
Kevin Barnett6c223762016-06-27 16:41:00 -050051#define DRIVER_NAME_SHORT "smartpqi"
52
Kevin Barnette1d213b2017-05-03 18:53:18 -050053#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
54
Kevin Barnett6c223762016-06-27 16:41:00 -050055MODULE_AUTHOR("Microsemi");
56MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
57 DRIVER_VERSION);
58MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59MODULE_VERSION(DRIVER_VERSION);
60MODULE_LICENSE("GPL");
61
Kevin Barnett6c223762016-06-27 16:41:00 -050062static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett5f310422017-05-03 18:54:55 -050063static void pqi_ctrl_offline_worker(struct work_struct *work);
Kevin Barnett376fb882017-05-03 18:54:43 -050064static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -050065static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66static void pqi_scan_start(struct Scsi_Host *shost);
67static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -050076 struct pqi_encryption_info *encryption_info, bool raid_bypass);
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -060077static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
78static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
79static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
80static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
81 u32 bytes_requested);
82static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
83static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -060084static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
85 struct pqi_scsi_dev *device, unsigned long timeout_secs);
Kevin Barnett6c223762016-06-27 16:41:00 -050086
87/* for flags argument to pqi_submit_raid_request_synchronous() */
88#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
89
90static struct scsi_transport_template *pqi_sas_transport_template;
91
92static atomic_t pqi_controller_count = ATOMIC_INIT(0);
93
Kevin Barnett3c509762017-05-03 18:54:37 -050094enum pqi_lockup_action {
95 NONE,
96 REBOOT,
97 PANIC
98};
99
100static enum pqi_lockup_action pqi_lockup_action = NONE;
101
102static struct {
103 enum pqi_lockup_action action;
104 char *name;
105} pqi_lockup_actions[] = {
106 {
107 .action = NONE,
108 .name = "none",
109 },
110 {
111 .action = REBOOT,
112 .name = "reboot",
113 },
114 {
115 .action = PANIC,
116 .name = "panic",
117 },
118};
119
Kevin Barnett6a50d6a2017-05-03 18:52:52 -0500120static unsigned int pqi_supported_event_types[] = {
121 PQI_EVENT_TYPE_HOTPLUG,
122 PQI_EVENT_TYPE_HARDWARE,
123 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
124 PQI_EVENT_TYPE_LOGICAL_DEVICE,
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -0600125 PQI_EVENT_TYPE_OFA,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -0500126 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
127 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
128};
129
Kevin Barnett6c223762016-06-27 16:41:00 -0500130static int pqi_disable_device_id_wildcards;
131module_param_named(disable_device_id_wildcards,
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -0500132 pqi_disable_device_id_wildcards, int, 0644);
Kevin Barnett6c223762016-06-27 16:41:00 -0500133MODULE_PARM_DESC(disable_device_id_wildcards,
134 "Disable device ID wildcards.");
135
Kevin Barnett5a259e32017-05-03 18:55:43 -0500136static int pqi_disable_heartbeat;
137module_param_named(disable_heartbeat,
138 pqi_disable_heartbeat, int, 0644);
139MODULE_PARM_DESC(disable_heartbeat,
140 "Disable heartbeat.");
141
142static int pqi_disable_ctrl_shutdown;
143module_param_named(disable_ctrl_shutdown,
144 pqi_disable_ctrl_shutdown, int, 0644);
145MODULE_PARM_DESC(disable_ctrl_shutdown,
146 "Disable controller shutdown when controller locked up.");
147
Kevin Barnett3c509762017-05-03 18:54:37 -0500148static char *pqi_lockup_action_param;
149module_param_named(lockup_action,
150 pqi_lockup_action_param, charp, 0644);
151MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
152 "\t\tSupported: none, reboot, panic\n"
153 "\t\tDefault: none");
154
Kevin Barnett6c223762016-06-27 16:41:00 -0500155static char *raid_levels[] = {
156 "RAID-0",
157 "RAID-4",
158 "RAID-1(1+0)",
159 "RAID-5",
160 "RAID-5+1",
161 "RAID-ADG",
162 "RAID-1(ADM)",
163};
164
165static char *pqi_raid_level_to_string(u8 raid_level)
166{
167 if (raid_level < ARRAY_SIZE(raid_levels))
168 return raid_levels[raid_level];
169
Kevin Barnetta9f93392017-05-03 18:55:31 -0500170 return "RAID UNKNOWN";
Kevin Barnett6c223762016-06-27 16:41:00 -0500171}
172
173#define SA_RAID_0 0
174#define SA_RAID_4 1
175#define SA_RAID_1 2 /* also used for RAID 10 */
176#define SA_RAID_5 3 /* also used for RAID 50 */
177#define SA_RAID_51 4
178#define SA_RAID_6 5 /* also used for RAID 60 */
179#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
180#define SA_RAID_MAX SA_RAID_ADM
181#define SA_RAID_UNKNOWN 0xff
182
183static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
184{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500185 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500186 scmd->scsi_done(scmd);
187}
188
Dave Carrollb6e2ef62018-12-07 16:28:23 -0600189static inline void pqi_disable_write_same(struct scsi_device *sdev)
190{
191 sdev->no_write_same = 1;
192}
193
Kevin Barnett6c223762016-06-27 16:41:00 -0500194static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
195{
196 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
197}
198
Kevin Barnett6c223762016-06-27 16:41:00 -0500199static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
200{
201 return !device->is_physical_device;
202}
203
Kevin Barnettbd10cf02017-05-03 18:54:12 -0500204static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
205{
206 return scsi3addr[2] != 0;
207}
208
Kevin Barnett6c223762016-06-27 16:41:00 -0500209static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
210{
211 if (ctrl_info->controller_online)
212 if (!sis_is_firmware_running(ctrl_info))
213 pqi_take_ctrl_offline(ctrl_info);
214}
215
216static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
217{
218 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
219}
220
Kevin Barnettff6abb72016-08-31 14:54:41 -0500221static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
222 struct pqi_ctrl_info *ctrl_info)
223{
224 return sis_read_driver_scratch(ctrl_info);
225}
226
227static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
228 enum pqi_ctrl_mode mode)
229{
230 sis_write_driver_scratch(ctrl_info, mode);
231}
232
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500233static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
234{
235 ctrl_info->block_requests = true;
236 scsi_block_requests(ctrl_info->scsi_host);
237}
238
239static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
240{
241 ctrl_info->block_requests = false;
242 wake_up_all(&ctrl_info->block_requests_wait);
Kevin Barnett376fb882017-05-03 18:54:43 -0500243 pqi_retry_raid_bypass_requests(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500244 scsi_unblock_requests(ctrl_info->scsi_host);
245}
246
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500247static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
248 unsigned long timeout_msecs)
249{
250 unsigned long remaining_msecs;
251
252 if (!pqi_ctrl_blocked(ctrl_info))
253 return timeout_msecs;
254
255 atomic_inc(&ctrl_info->num_blocked_threads);
256
257 if (timeout_msecs == NO_TIMEOUT) {
258 wait_event(ctrl_info->block_requests_wait,
259 !pqi_ctrl_blocked(ctrl_info));
260 remaining_msecs = timeout_msecs;
261 } else {
262 unsigned long remaining_jiffies;
263
264 remaining_jiffies =
265 wait_event_timeout(ctrl_info->block_requests_wait,
266 !pqi_ctrl_blocked(ctrl_info),
267 msecs_to_jiffies(timeout_msecs));
268 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
269 }
270
271 atomic_dec(&ctrl_info->num_blocked_threads);
272
273 return remaining_msecs;
274}
275
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500276static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
277{
278 while (atomic_read(&ctrl_info->num_busy_threads) >
279 atomic_read(&ctrl_info->num_blocked_threads))
280 usleep_range(1000, 2000);
281}
282
Kevin Barnett03b288cf2017-05-03 18:54:49 -0500283static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
284{
285 return device->device_offline;
286}
287
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500288static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
289{
290 device->in_reset = true;
291}
292
293static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
294{
295 device->in_reset = false;
296}
297
298static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
299{
300 return device->in_reset;
301}
Kevin Barnett6c223762016-06-27 16:41:00 -0500302
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -0600303static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
304{
305 ctrl_info->in_ofa = true;
306}
307
308static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
309{
310 ctrl_info->in_ofa = false;
311}
312
313static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
314{
315 return ctrl_info->in_ofa;
316}
317
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -0600318static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
319{
320 device->in_remove = true;
321}
322
323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
324 struct pqi_scsi_dev *device)
325{
326 return device->in_remove & !ctrl_info->in_shutdown;
327}
328
Kevin Barnett5f310422017-05-03 18:54:55 -0500329static inline void pqi_schedule_rescan_worker_with_delay(
330 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
331{
332 if (pqi_ctrl_offline(ctrl_info))
333 return;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -0600334 if (pqi_ctrl_in_ofa(ctrl_info))
335 return;
Kevin Barnett5f310422017-05-03 18:54:55 -0500336
337 schedule_delayed_work(&ctrl_info->rescan_work, delay);
338}
339
Kevin Barnett6c223762016-06-27 16:41:00 -0500340static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
341{
Kevin Barnett5f310422017-05-03 18:54:55 -0500342 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
343}
344
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -0600345#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
Kevin Barnett5f310422017-05-03 18:54:55 -0500346
347static inline void pqi_schedule_rescan_worker_delayed(
348 struct pqi_ctrl_info *ctrl_info)
349{
350 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
Kevin Barnett6c223762016-06-27 16:41:00 -0500351}
352
Kevin Barnett061ef062017-05-03 18:53:05 -0500353static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
354{
355 cancel_delayed_work_sync(&ctrl_info->rescan_work);
356}
357
Kevin Barnett98f87662017-05-03 18:53:11 -0500358static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
359{
360 if (!ctrl_info->heartbeat_counter)
361 return 0;
362
363 return readl(ctrl_info->heartbeat_counter);
364}
365
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -0600366static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
367{
368 if (!ctrl_info->soft_reset_status)
369 return 0;
370
371 return readb(ctrl_info->soft_reset_status);
372}
373
374static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
375 u8 clear)
376{
377 u8 status;
378
379 if (!ctrl_info->soft_reset_status)
380 return;
381
382 status = pqi_read_soft_reset_status(ctrl_info);
383 status &= ~clear;
384 writeb(status, ctrl_info->soft_reset_status);
385}
386
Kevin Barnett6c223762016-06-27 16:41:00 -0500387static int pqi_map_single(struct pci_dev *pci_dev,
388 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200389 size_t buffer_length, enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500390{
391 dma_addr_t bus_address;
392
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200393 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500394 return 0;
395
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200396 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
Kevin Barnett6c223762016-06-27 16:41:00 -0500397 data_direction);
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200398 if (dma_mapping_error(&pci_dev->dev, bus_address))
Kevin Barnett6c223762016-06-27 16:41:00 -0500399 return -ENOMEM;
400
401 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
402 put_unaligned_le32(buffer_length, &sg_descriptor->length);
403 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
404
405 return 0;
406}
407
408static void pqi_pci_unmap(struct pci_dev *pci_dev,
409 struct pqi_sg_descriptor *descriptors, int num_descriptors,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200410 enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500411{
412 int i;
413
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200414 if (data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500415 return;
416
417 for (i = 0; i < num_descriptors; i++)
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200418 dma_unmap_single(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -0500419 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
420 get_unaligned_le32(&descriptors[i].length),
421 data_direction);
422}
423
424static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
425 struct pqi_raid_path_request *request, u8 cmd,
426 u8 *scsi3addr, void *buffer, size_t buffer_length,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200427 u16 vpd_page, enum dma_data_direction *dir)
Kevin Barnett6c223762016-06-27 16:41:00 -0500428{
429 u8 *cdb;
Dave Carroll171c2862018-12-07 16:28:35 -0600430 size_t cdb_length = buffer_length;
Kevin Barnett6c223762016-06-27 16:41:00 -0500431
432 memset(request, 0, sizeof(*request));
433
434 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
435 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
436 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
437 &request->header.iu_length);
438 put_unaligned_le32(buffer_length, &request->buffer_length);
439 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
440 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
441 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
442
443 cdb = request->cdb;
444
445 switch (cmd) {
446 case INQUIRY:
447 request->data_direction = SOP_READ_FLAG;
448 cdb[0] = INQUIRY;
449 if (vpd_page & VPD_PAGE) {
450 cdb[1] = 0x1;
451 cdb[2] = (u8)vpd_page;
452 }
Dave Carroll171c2862018-12-07 16:28:35 -0600453 cdb[4] = (u8)cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -0500454 break;
455 case CISS_REPORT_LOG:
456 case CISS_REPORT_PHYS:
457 request->data_direction = SOP_READ_FLAG;
458 cdb[0] = cmd;
459 if (cmd == CISS_REPORT_PHYS)
460 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
461 else
462 cdb[1] = CISS_REPORT_LOG_EXTENDED;
Dave Carroll171c2862018-12-07 16:28:35 -0600463 put_unaligned_be32(cdb_length, &cdb[6]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500464 break;
465 case CISS_GET_RAID_MAP:
466 request->data_direction = SOP_READ_FLAG;
467 cdb[0] = CISS_READ;
468 cdb[1] = CISS_GET_RAID_MAP;
Dave Carroll171c2862018-12-07 16:28:35 -0600469 put_unaligned_be32(cdb_length, &cdb[6]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500470 break;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500471 case SA_FLUSH_CACHE:
Kevin Barnett6c223762016-06-27 16:41:00 -0500472 request->data_direction = SOP_WRITE_FLAG;
473 cdb[0] = BMIC_WRITE;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500474 cdb[6] = BMIC_FLUSH_CACHE;
Dave Carroll171c2862018-12-07 16:28:35 -0600475 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500476 break;
Dave Carroll171c2862018-12-07 16:28:35 -0600477 case BMIC_SENSE_DIAG_OPTIONS:
478 cdb_length = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -0500479 case BMIC_IDENTIFY_CONTROLLER:
480 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
481 request->data_direction = SOP_READ_FLAG;
482 cdb[0] = BMIC_READ;
483 cdb[6] = cmd;
Dave Carroll171c2862018-12-07 16:28:35 -0600484 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500485 break;
Dave Carroll171c2862018-12-07 16:28:35 -0600486 case BMIC_SET_DIAG_OPTIONS:
487 cdb_length = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -0500488 case BMIC_WRITE_HOST_WELLNESS:
489 request->data_direction = SOP_WRITE_FLAG;
490 cdb[0] = BMIC_WRITE;
491 cdb[6] = cmd;
Dave Carroll171c2862018-12-07 16:28:35 -0600492 put_unaligned_be16(cdb_length, &cdb[7]);
Kevin Barnett6c223762016-06-27 16:41:00 -0500493 break;
Don Brace3d46a592018-12-07 16:30:05 -0600494 case BMIC_CSMI_PASSTHRU:
495 request->data_direction = SOP_BIDIRECTIONAL;
496 cdb[0] = BMIC_WRITE;
497 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
498 cdb[6] = cmd;
499 put_unaligned_be16(cdb_length, &cdb[7]);
500 break;
Kevin Barnett6c223762016-06-27 16:41:00 -0500501 default:
502 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
503 cmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500504 break;
505 }
506
507 switch (request->data_direction) {
508 case SOP_READ_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200509 *dir = DMA_FROM_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500510 break;
511 case SOP_WRITE_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200512 *dir = DMA_TO_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500513 break;
514 case SOP_NO_DIRECTION_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200515 *dir = DMA_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500516 break;
517 default:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200518 *dir = DMA_BIDIRECTIONAL;
Kevin Barnett6c223762016-06-27 16:41:00 -0500519 break;
520 }
521
Kevin Barnett6c223762016-06-27 16:41:00 -0500522 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200523 buffer, buffer_length, *dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500524}
525
Kevin Barnett376fb882017-05-03 18:54:43 -0500526static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
527{
528 io_request->scmd = NULL;
529 io_request->status = 0;
530 io_request->error_info = NULL;
531 io_request->raid_bypass = false;
532}
533
Kevin Barnett6c223762016-06-27 16:41:00 -0500534static struct pqi_io_request *pqi_alloc_io_request(
535 struct pqi_ctrl_info *ctrl_info)
536{
537 struct pqi_io_request *io_request;
538 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
539
540 while (1) {
541 io_request = &ctrl_info->io_request_pool[i];
542 if (atomic_inc_return(&io_request->refcount) == 1)
543 break;
544 atomic_dec(&io_request->refcount);
545 i = (i + 1) % ctrl_info->max_io_slots;
546 }
547
548 /* benignly racy */
549 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
550
Kevin Barnett376fb882017-05-03 18:54:43 -0500551 pqi_reinit_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -0500552
553 return io_request;
554}
555
556static void pqi_free_io_request(struct pqi_io_request *io_request)
557{
558 atomic_dec(&io_request->refcount);
559}
560
Dave Carroll02133b62018-12-07 16:28:41 -0600561static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
562 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
563 struct pqi_raid_error_info *error_info,
564 unsigned long timeout_msecs)
Kevin Barnett6c223762016-06-27 16:41:00 -0500565{
566 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200567 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500568 struct pqi_raid_path_request request;
569
570 rc = pqi_build_raid_path_request(ctrl_info, &request,
Dave Carroll02133b62018-12-07 16:28:41 -0600571 cmd, scsi3addr, buffer,
572 buffer_length, vpd_page, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500573 if (rc)
574 return rc;
575
Dave Carroll02133b62018-12-07 16:28:41 -0600576 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
577 0, error_info, timeout_msecs);
Kevin Barnett6c223762016-06-27 16:41:00 -0500578
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200579 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500580 return rc;
581}
582
Dave Carroll02133b62018-12-07 16:28:41 -0600583/* Helper functions for pqi_send_scsi_raid_request */
584
585static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
586 u8 cmd, void *buffer, size_t buffer_length)
587{
588 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
589 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
590}
591
592static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
593 u8 cmd, void *buffer, size_t buffer_length,
594 struct pqi_raid_error_info *error_info)
595{
596 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
597 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
598}
599
600
601static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
602 struct bmic_identify_controller *buffer)
603{
604 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
605 buffer, sizeof(*buffer));
606}
607
608static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -0500609 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
610{
Dave Carroll02133b62018-12-07 16:28:41 -0600611 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
612 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500613}
614
Dave Carrollcd128242018-12-07 16:28:47 -0600615static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
616 u8 *scsi3addr, u16 vpd_page)
617{
618 int rc;
619 int i;
620 int pages;
621 unsigned char *buf, bufsize;
622
623 buf = kzalloc(256, GFP_KERNEL);
624 if (!buf)
625 return false;
626
627 /* Get the size of the page list first */
628 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
629 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
630 buf, SCSI_VPD_HEADER_SZ);
631 if (rc != 0)
632 goto exit_unsupported;
633
634 pages = buf[3];
635 if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
636 bufsize = pages + SCSI_VPD_HEADER_SZ;
637 else
638 bufsize = 255;
639
640 /* Get the whole VPD page list */
641 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
642 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
643 buf, bufsize);
644 if (rc != 0)
645 goto exit_unsupported;
646
647 pages = buf[3];
648 for (i = 1; i <= pages; i++)
649 if (buf[3 + i] == vpd_page)
650 goto exit_supported;
651
652exit_unsupported:
653 kfree(buf);
654 return false;
655
656exit_supported:
657 kfree(buf);
658 return true;
659}
660
661static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
662 u8 *scsi3addr, u8 *device_id, int buflen)
663{
664 int rc;
665 unsigned char *buf;
666
667 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
668 return 1; /* function not supported */
669
670 buf = kzalloc(64, GFP_KERNEL);
671 if (!buf)
672 return -ENOMEM;
673
674 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
675 VPD_PAGE | SCSI_VPD_DEVICE_ID,
676 buf, 64);
677 if (rc == 0) {
678 if (buflen > 16)
679 buflen = 16;
680 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
681 }
682
683 kfree(buf);
684
685 return rc;
686}
687
Kevin Barnett6c223762016-06-27 16:41:00 -0500688static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
689 struct pqi_scsi_dev *device,
690 struct bmic_identify_physical_device *buffer,
691 size_t buffer_length)
692{
693 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200694 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500695 u16 bmic_device_index;
696 struct pqi_raid_path_request request;
697
698 rc = pqi_build_raid_path_request(ctrl_info, &request,
699 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200700 buffer_length, 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500701 if (rc)
702 return rc;
703
704 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
705 request.cdb[2] = (u8)bmic_device_index;
706 request.cdb[9] = (u8)(bmic_device_index >> 8);
707
708 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
709 0, NULL, NO_TIMEOUT);
710
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200711 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500712 return rc;
713}
714
Kevin Barnett58322fe2017-08-10 13:46:45 -0500715static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
716 enum bmic_flush_cache_shutdown_event shutdown_event)
Kevin Barnett6c223762016-06-27 16:41:00 -0500717{
718 int rc;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500719 struct bmic_flush_cache *flush_cache;
Kevin Barnett6c223762016-06-27 16:41:00 -0500720
721 /*
722 * Don't bother trying to flush the cache if the controller is
723 * locked up.
724 */
725 if (pqi_ctrl_offline(ctrl_info))
726 return -ENXIO;
727
Kevin Barnett58322fe2017-08-10 13:46:45 -0500728 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
729 if (!flush_cache)
Kevin Barnett6c223762016-06-27 16:41:00 -0500730 return -ENOMEM;
731
Kevin Barnett58322fe2017-08-10 13:46:45 -0500732 flush_cache->shutdown_event = shutdown_event;
733
Dave Carroll02133b62018-12-07 16:28:41 -0600734 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
735 sizeof(*flush_cache));
Kevin Barnett6c223762016-06-27 16:41:00 -0500736
Kevin Barnett58322fe2017-08-10 13:46:45 -0500737 kfree(flush_cache);
Kevin Barnett6c223762016-06-27 16:41:00 -0500738
739 return rc;
740}
741
Don Brace3d46a592018-12-07 16:30:05 -0600742int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
743 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
744 struct pqi_raid_error_info *error_info)
745{
746 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
747 buffer, buffer_length, error_info);
748}
Dave Carroll171c2862018-12-07 16:28:35 -0600749
750#define PQI_FETCH_PTRAID_DATA (1UL<<31)
751
752static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
753{
754 int rc;
Dave Carroll171c2862018-12-07 16:28:35 -0600755 struct bmic_diag_options *diag;
Dave Carroll171c2862018-12-07 16:28:35 -0600756
757 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
758 if (!diag)
759 return -ENOMEM;
760
Dave Carroll02133b62018-12-07 16:28:41 -0600761 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
762 diag, sizeof(*diag));
Dave Carroll171c2862018-12-07 16:28:35 -0600763 if (rc)
764 goto out;
765
766 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
767
Dave Carroll02133b62018-12-07 16:28:41 -0600768 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
769 diag, sizeof(*diag));
Dave Carroll171c2862018-12-07 16:28:35 -0600770out:
771 kfree(diag);
772
773 return rc;
774}
775
Dave Carroll02133b62018-12-07 16:28:41 -0600776static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -0500777 void *buffer, size_t buffer_length)
778{
Dave Carroll02133b62018-12-07 16:28:41 -0600779 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
780 buffer, buffer_length);
Kevin Barnett6c223762016-06-27 16:41:00 -0500781}
782
783#pragma pack(1)
784
785struct bmic_host_wellness_driver_version {
786 u8 start_tag[4];
787 u8 driver_version_tag[2];
788 __le16 driver_version_length;
789 char driver_version[32];
Mahesh Rajashekharab2346b52018-12-07 16:28:29 -0600790 u8 dont_write_tag[2];
Kevin Barnett6c223762016-06-27 16:41:00 -0500791 u8 end_tag[2];
792};
793
794#pragma pack()
795
796static int pqi_write_driver_version_to_host_wellness(
797 struct pqi_ctrl_info *ctrl_info)
798{
799 int rc;
800 struct bmic_host_wellness_driver_version *buffer;
801 size_t buffer_length;
802
803 buffer_length = sizeof(*buffer);
804
805 buffer = kmalloc(buffer_length, GFP_KERNEL);
806 if (!buffer)
807 return -ENOMEM;
808
809 buffer->start_tag[0] = '<';
810 buffer->start_tag[1] = 'H';
811 buffer->start_tag[2] = 'W';
812 buffer->start_tag[3] = '>';
813 buffer->driver_version_tag[0] = 'D';
814 buffer->driver_version_tag[1] = 'V';
815 put_unaligned_le16(sizeof(buffer->driver_version),
816 &buffer->driver_version_length);
Kevin Barnett061ef062017-05-03 18:53:05 -0500817 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
Kevin Barnett6c223762016-06-27 16:41:00 -0500818 sizeof(buffer->driver_version) - 1);
819 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
Mahesh Rajashekharab2346b52018-12-07 16:28:29 -0600820 buffer->dont_write_tag[0] = 'D';
821 buffer->dont_write_tag[1] = 'W';
Kevin Barnett6c223762016-06-27 16:41:00 -0500822 buffer->end_tag[0] = 'Z';
823 buffer->end_tag[1] = 'Z';
824
825 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
826
827 kfree(buffer);
828
829 return rc;
830}
831
832#pragma pack(1)
833
834struct bmic_host_wellness_time {
835 u8 start_tag[4];
836 u8 time_tag[2];
837 __le16 time_length;
838 u8 time[8];
839 u8 dont_write_tag[2];
840 u8 end_tag[2];
841};
842
843#pragma pack()
844
845static int pqi_write_current_time_to_host_wellness(
846 struct pqi_ctrl_info *ctrl_info)
847{
848 int rc;
849 struct bmic_host_wellness_time *buffer;
850 size_t buffer_length;
851 time64_t local_time;
852 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100853 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500854
855 buffer_length = sizeof(*buffer);
856
857 buffer = kmalloc(buffer_length, GFP_KERNEL);
858 if (!buffer)
859 return -ENOMEM;
860
861 buffer->start_tag[0] = '<';
862 buffer->start_tag[1] = 'H';
863 buffer->start_tag[2] = 'W';
864 buffer->start_tag[3] = '>';
865 buffer->time_tag[0] = 'T';
866 buffer->time_tag[1] = 'D';
867 put_unaligned_le16(sizeof(buffer->time),
868 &buffer->time_length);
869
Arnd Bergmanned108582017-02-17 16:03:52 +0100870 local_time = ktime_get_real_seconds();
871 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500872 year = tm.tm_year + 1900;
873
874 buffer->time[0] = bin2bcd(tm.tm_hour);
875 buffer->time[1] = bin2bcd(tm.tm_min);
876 buffer->time[2] = bin2bcd(tm.tm_sec);
877 buffer->time[3] = 0;
878 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
879 buffer->time[5] = bin2bcd(tm.tm_mday);
880 buffer->time[6] = bin2bcd(year / 100);
881 buffer->time[7] = bin2bcd(year % 100);
882
883 buffer->dont_write_tag[0] = 'D';
884 buffer->dont_write_tag[1] = 'W';
885 buffer->end_tag[0] = 'Z';
886 buffer->end_tag[1] = 'Z';
887
888 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
889
890 kfree(buffer);
891
892 return rc;
893}
894
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -0600895#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
Kevin Barnett6c223762016-06-27 16:41:00 -0500896
897static void pqi_update_time_worker(struct work_struct *work)
898{
899 int rc;
900 struct pqi_ctrl_info *ctrl_info;
901
902 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
903 update_time_work);
904
Kevin Barnett5f310422017-05-03 18:54:55 -0500905 if (pqi_ctrl_offline(ctrl_info))
906 return;
907
Kevin Barnett6c223762016-06-27 16:41:00 -0500908 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
909 if (rc)
910 dev_warn(&ctrl_info->pci_dev->dev,
911 "error updating time on controller\n");
912
913 schedule_delayed_work(&ctrl_info->update_time_work,
914 PQI_UPDATE_TIME_WORK_INTERVAL);
915}
916
917static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500918 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500919{
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500920 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett061ef062017-05-03 18:53:05 -0500921}
922
923static inline void pqi_cancel_update_time_worker(
924 struct pqi_ctrl_info *ctrl_info)
925{
Kevin Barnett061ef062017-05-03 18:53:05 -0500926 cancel_delayed_work_sync(&ctrl_info->update_time_work);
Kevin Barnett6c223762016-06-27 16:41:00 -0500927}
928
Dave Carroll02133b62018-12-07 16:28:41 -0600929static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
Kevin Barnett6c223762016-06-27 16:41:00 -0500930 void *buffer, size_t buffer_length)
931{
Dave Carroll02133b62018-12-07 16:28:41 -0600932 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
933 buffer_length);
Kevin Barnett6c223762016-06-27 16:41:00 -0500934}
935
936static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
937 void **buffer)
938{
939 int rc;
940 size_t lun_list_length;
941 size_t lun_data_length;
942 size_t new_lun_list_length;
943 void *lun_data = NULL;
944 struct report_lun_header *report_lun_header;
945
946 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
947 if (!report_lun_header) {
948 rc = -ENOMEM;
949 goto out;
950 }
951
952 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
953 sizeof(*report_lun_header));
954 if (rc)
955 goto out;
956
957 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
958
959again:
960 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
961
962 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
963 if (!lun_data) {
964 rc = -ENOMEM;
965 goto out;
966 }
967
968 if (lun_list_length == 0) {
969 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
970 goto out;
971 }
972
973 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
974 if (rc)
975 goto out;
976
977 new_lun_list_length = get_unaligned_be32(
978 &((struct report_lun_header *)lun_data)->list_length);
979
980 if (new_lun_list_length > lun_list_length) {
981 lun_list_length = new_lun_list_length;
982 kfree(lun_data);
983 goto again;
984 }
985
986out:
987 kfree(report_lun_header);
988
989 if (rc) {
990 kfree(lun_data);
991 lun_data = NULL;
992 }
993
994 *buffer = lun_data;
995
996 return rc;
997}
998
999static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
1000 void **buffer)
1001{
1002 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
1003 buffer);
1004}
1005
1006static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
1007 void **buffer)
1008{
1009 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1010}
1011
1012static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1013 struct report_phys_lun_extended **physdev_list,
1014 struct report_log_lun_extended **logdev_list)
1015{
1016 int rc;
1017 size_t logdev_list_length;
1018 size_t logdev_data_length;
1019 struct report_log_lun_extended *internal_logdev_list;
1020 struct report_log_lun_extended *logdev_data;
1021 struct report_lun_header report_lun_header;
1022
1023 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1024 if (rc)
1025 dev_err(&ctrl_info->pci_dev->dev,
1026 "report physical LUNs failed\n");
1027
1028 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1029 if (rc)
1030 dev_err(&ctrl_info->pci_dev->dev,
1031 "report logical LUNs failed\n");
1032
1033 /*
1034 * Tack the controller itself onto the end of the logical device list.
1035 */
1036
1037 logdev_data = *logdev_list;
1038
1039 if (logdev_data) {
1040 logdev_list_length =
1041 get_unaligned_be32(&logdev_data->header.list_length);
1042 } else {
1043 memset(&report_lun_header, 0, sizeof(report_lun_header));
1044 logdev_data =
1045 (struct report_log_lun_extended *)&report_lun_header;
1046 logdev_list_length = 0;
1047 }
1048
1049 logdev_data_length = sizeof(struct report_lun_header) +
1050 logdev_list_length;
1051
1052 internal_logdev_list = kmalloc(logdev_data_length +
1053 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1054 if (!internal_logdev_list) {
1055 kfree(*logdev_list);
1056 *logdev_list = NULL;
1057 return -ENOMEM;
1058 }
1059
1060 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1061 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1062 sizeof(struct report_log_lun_extended_entry));
1063 put_unaligned_be32(logdev_list_length +
1064 sizeof(struct report_log_lun_extended_entry),
1065 &internal_logdev_list->header.list_length);
1066
1067 kfree(*logdev_list);
1068 *logdev_list = internal_logdev_list;
1069
1070 return 0;
1071}
1072
1073static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1074 int bus, int target, int lun)
1075{
1076 device->bus = bus;
1077 device->target = target;
1078 device->lun = lun;
1079}
1080
1081static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1082{
1083 u8 *scsi3addr;
1084 u32 lunid;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001085 int bus;
1086 int target;
1087 int lun;
Kevin Barnett6c223762016-06-27 16:41:00 -05001088
1089 scsi3addr = device->scsi3addr;
1090 lunid = get_unaligned_le32(scsi3addr);
1091
1092 if (pqi_is_hba_lunid(scsi3addr)) {
1093 /* The specified device is the controller. */
1094 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1095 device->target_lun_valid = true;
1096 return;
1097 }
1098
1099 if (pqi_is_logical_device(device)) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001100 if (device->is_external_raid_device) {
1101 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1102 target = (lunid >> 16) & 0x3fff;
1103 lun = lunid & 0xff;
1104 } else {
1105 bus = PQI_RAID_VOLUME_BUS;
1106 target = 0;
1107 lun = lunid & 0x3fff;
1108 }
1109 pqi_set_bus_target_lun(device, bus, target, lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05001110 device->target_lun_valid = true;
1111 return;
1112 }
1113
1114 /*
1115 * Defer target and LUN assignment for non-controller physical devices
1116 * because the SAS transport layer will make these assignments later.
1117 */
1118 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1119}
1120
1121static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1122 struct pqi_scsi_dev *device)
1123{
1124 int rc;
1125 u8 raid_level;
1126 u8 *buffer;
1127
1128 raid_level = SA_RAID_UNKNOWN;
1129
1130 buffer = kmalloc(64, GFP_KERNEL);
1131 if (buffer) {
1132 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1133 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1134 if (rc == 0) {
1135 raid_level = buffer[8];
1136 if (raid_level > SA_RAID_MAX)
1137 raid_level = SA_RAID_UNKNOWN;
1138 }
1139 kfree(buffer);
1140 }
1141
1142 device->raid_level = raid_level;
1143}
1144
1145static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1146 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1147{
1148 char *err_msg;
1149 u32 raid_map_size;
1150 u32 r5or6_blocks_per_row;
Kevin Barnett6c223762016-06-27 16:41:00 -05001151
1152 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1153
1154 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1155 err_msg = "RAID map too small";
1156 goto bad_raid_map;
1157 }
1158
Kevin Barnett6c223762016-06-27 16:41:00 -05001159 if (device->raid_level == SA_RAID_1) {
1160 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1161 err_msg = "invalid RAID-1 map";
1162 goto bad_raid_map;
1163 }
1164 } else if (device->raid_level == SA_RAID_ADM) {
1165 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1166 err_msg = "invalid RAID-1(ADM) map";
1167 goto bad_raid_map;
1168 }
1169 } else if ((device->raid_level == SA_RAID_5 ||
1170 device->raid_level == SA_RAID_6) &&
1171 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1172 /* RAID 50/60 */
1173 r5or6_blocks_per_row =
1174 get_unaligned_le16(&raid_map->strip_size) *
1175 get_unaligned_le16(&raid_map->data_disks_per_row);
1176 if (r5or6_blocks_per_row == 0) {
1177 err_msg = "invalid RAID-5 or RAID-6 map";
1178 goto bad_raid_map;
1179 }
1180 }
1181
1182 return 0;
1183
1184bad_raid_map:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001185 dev_warn(&ctrl_info->pci_dev->dev,
Kevin Barnett38a73382017-09-27 16:30:05 -05001186 "logical device %08x%08x %s\n",
1187 *((u32 *)&device->scsi3addr),
1188 *((u32 *)&device->scsi3addr[4]), err_msg);
Kevin Barnett6c223762016-06-27 16:41:00 -05001189
1190 return -EINVAL;
1191}
1192
1193static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1194 struct pqi_scsi_dev *device)
1195{
1196 int rc;
Ajish Koshya91aaae2018-12-07 16:29:31 -06001197 u32 raid_map_size;
Kevin Barnett6c223762016-06-27 16:41:00 -05001198 struct raid_map *raid_map;
1199
1200 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1201 if (!raid_map)
1202 return -ENOMEM;
1203
Ajish Koshya91aaae2018-12-07 16:29:31 -06001204 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1205 device->scsi3addr, raid_map, sizeof(*raid_map),
1206 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -05001207
1208 if (rc)
1209 goto error;
1210
Ajish Koshya91aaae2018-12-07 16:29:31 -06001211 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1212
1213 if (raid_map_size > sizeof(*raid_map)) {
1214
1215 kfree(raid_map);
1216
1217 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1218 if (!raid_map)
1219 return -ENOMEM;
1220
1221 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1222 device->scsi3addr, raid_map, raid_map_size,
1223 0, NULL, NO_TIMEOUT);
1224 if (rc)
1225 goto error;
1226
1227 if (get_unaligned_le32(&raid_map->structure_size)
1228 != raid_map_size) {
1229 dev_warn(&ctrl_info->pci_dev->dev,
1230 "Requested %d bytes, received %d bytes",
1231 raid_map_size,
1232 get_unaligned_le32(&raid_map->structure_size));
1233 goto error;
1234 }
1235 }
1236
Kevin Barnett6c223762016-06-27 16:41:00 -05001237 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1238 if (rc)
1239 goto error;
1240
1241 device->raid_map = raid_map;
1242
1243 return 0;
1244
1245error:
1246 kfree(raid_map);
1247
1248 return rc;
1249}
1250
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001251static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05001252 struct pqi_scsi_dev *device)
1253{
1254 int rc;
1255 u8 *buffer;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001256 u8 bypass_status;
Kevin Barnett6c223762016-06-27 16:41:00 -05001257
1258 buffer = kmalloc(64, GFP_KERNEL);
1259 if (!buffer)
1260 return;
1261
1262 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001263 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
Kevin Barnett6c223762016-06-27 16:41:00 -05001264 if (rc)
1265 goto out;
1266
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001267#define RAID_BYPASS_STATUS 4
1268#define RAID_BYPASS_CONFIGURED 0x1
1269#define RAID_BYPASS_ENABLED 0x2
Kevin Barnett6c223762016-06-27 16:41:00 -05001270
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001271 bypass_status = buffer[RAID_BYPASS_STATUS];
1272 device->raid_bypass_configured =
1273 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1274 if (device->raid_bypass_configured &&
1275 (bypass_status & RAID_BYPASS_ENABLED) &&
1276 pqi_get_raid_map(ctrl_info, device) == 0)
1277 device->raid_bypass_enabled = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001278
1279out:
1280 kfree(buffer);
1281}
1282
1283/*
1284 * Use vendor-specific VPD to determine online/offline status of a volume.
1285 */
1286
1287static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1288 struct pqi_scsi_dev *device)
1289{
1290 int rc;
1291 size_t page_length;
1292 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1293 bool volume_offline = true;
1294 u32 volume_flags;
1295 struct ciss_vpd_logical_volume_status *vpd;
1296
1297 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1298 if (!vpd)
1299 goto no_buffer;
1300
1301 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1302 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1303 if (rc)
1304 goto out;
1305
Dave Carroll7ff44492018-12-07 16:29:45 -06001306 if (vpd->page_code != CISS_VPD_LV_STATUS)
1307 goto out;
1308
Kevin Barnett6c223762016-06-27 16:41:00 -05001309 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1310 volume_status) + vpd->page_length;
1311 if (page_length < sizeof(*vpd))
1312 goto out;
1313
1314 volume_status = vpd->volume_status;
1315 volume_flags = get_unaligned_be32(&vpd->flags);
1316 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1317
1318out:
1319 kfree(vpd);
1320no_buffer:
1321 device->volume_status = volume_status;
1322 device->volume_offline = volume_offline;
1323}
1324
Kevin Barnett26b390a2018-06-18 13:22:48 -05001325#define PQI_INQUIRY_PAGE0_RETRIES 3
1326
Kevin Barnett6c223762016-06-27 16:41:00 -05001327static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1328 struct pqi_scsi_dev *device)
1329{
1330 int rc;
1331 u8 *buffer;
Kevin Barnett26b390a2018-06-18 13:22:48 -05001332 unsigned int retries;
Kevin Barnett6c223762016-06-27 16:41:00 -05001333
Don Brace3d46a592018-12-07 16:30:05 -06001334 if (device->is_expander_smp_device)
1335 return 0;
1336
Kevin Barnett6c223762016-06-27 16:41:00 -05001337 buffer = kmalloc(64, GFP_KERNEL);
1338 if (!buffer)
1339 return -ENOMEM;
1340
1341 /* Send an inquiry to the device to see what it is. */
Kevin Barnett26b390a2018-06-18 13:22:48 -05001342 for (retries = 0;;) {
1343 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1344 buffer, 64);
1345 if (rc == 0)
1346 break;
1347 if (pqi_is_logical_device(device) ||
1348 rc != PQI_CMD_STATUS_ABORTED ||
1349 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1350 goto out;
1351 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001352
1353 scsi_sanitize_inquiry_string(&buffer[8], 8);
1354 scsi_sanitize_inquiry_string(&buffer[16], 16);
1355
1356 device->devtype = buffer[0] & 0x1f;
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001357 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1358 memcpy(device->model, &buffer[16], sizeof(device->model));
Kevin Barnett6c223762016-06-27 16:41:00 -05001359
1360 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001361 if (device->is_external_raid_device) {
1362 device->raid_level = SA_RAID_UNKNOWN;
1363 device->volume_status = CISS_LV_OK;
1364 device->volume_offline = false;
1365 } else {
1366 pqi_get_raid_level(ctrl_info, device);
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001367 pqi_get_raid_bypass_status(ctrl_info, device);
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001368 pqi_get_volume_status(ctrl_info, device);
1369 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001370 }
1371
Dave Carrollcd128242018-12-07 16:28:47 -06001372 if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1373 device->unique_id, sizeof(device->unique_id)) < 0)
1374 dev_warn(&ctrl_info->pci_dev->dev,
1375 "Can't get device id for scsi %d:%d:%d:%d\n",
1376 ctrl_info->scsi_host->host_no,
1377 device->bus, device->target,
1378 device->lun);
1379
Kevin Barnett6c223762016-06-27 16:41:00 -05001380out:
1381 kfree(buffer);
1382
1383 return rc;
1384}
1385
1386static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1387 struct pqi_scsi_dev *device,
1388 struct bmic_identify_physical_device *id_phys)
1389{
1390 int rc;
1391
1392 memset(id_phys, 0, sizeof(*id_phys));
1393
1394 rc = pqi_identify_physical_device(ctrl_info, device,
1395 id_phys, sizeof(*id_phys));
1396 if (rc) {
1397 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1398 return;
1399 }
1400
1401 device->queue_depth =
1402 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1403 device->device_type = id_phys->device_type;
1404 device->active_path_index = id_phys->active_path_number;
1405 device->path_map = id_phys->redundant_path_present_map;
1406 memcpy(&device->box,
1407 &id_phys->alternate_paths_phys_box_on_port,
1408 sizeof(device->box));
1409 memcpy(&device->phys_connector,
1410 &id_phys->alternate_paths_phys_connector,
1411 sizeof(device->phys_connector));
1412 device->bay = id_phys->phys_bay_in_box;
1413}
1414
1415static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1416 struct pqi_scsi_dev *device)
1417{
1418 char *status;
1419 static const char unknown_state_str[] =
1420 "Volume is in an unknown state (%u)";
1421 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1422
1423 switch (device->volume_status) {
1424 case CISS_LV_OK:
1425 status = "Volume online";
1426 break;
1427 case CISS_LV_FAILED:
1428 status = "Volume failed";
1429 break;
1430 case CISS_LV_NOT_CONFIGURED:
1431 status = "Volume not configured";
1432 break;
1433 case CISS_LV_DEGRADED:
1434 status = "Volume degraded";
1435 break;
1436 case CISS_LV_READY_FOR_RECOVERY:
1437 status = "Volume ready for recovery operation";
1438 break;
1439 case CISS_LV_UNDERGOING_RECOVERY:
1440 status = "Volume undergoing recovery";
1441 break;
1442 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1443 status = "Wrong physical drive was replaced";
1444 break;
1445 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1446 status = "A physical drive not properly connected";
1447 break;
1448 case CISS_LV_HARDWARE_OVERHEATING:
1449 status = "Hardware is overheating";
1450 break;
1451 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1452 status = "Hardware has overheated";
1453 break;
1454 case CISS_LV_UNDERGOING_EXPANSION:
1455 status = "Volume undergoing expansion";
1456 break;
1457 case CISS_LV_NOT_AVAILABLE:
1458 status = "Volume waiting for transforming volume";
1459 break;
1460 case CISS_LV_QUEUED_FOR_EXPANSION:
1461 status = "Volume queued for expansion";
1462 break;
1463 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1464 status = "Volume disabled due to SCSI ID conflict";
1465 break;
1466 case CISS_LV_EJECTED:
1467 status = "Volume has been ejected";
1468 break;
1469 case CISS_LV_UNDERGOING_ERASE:
1470 status = "Volume undergoing background erase";
1471 break;
1472 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1473 status = "Volume ready for predictive spare rebuild";
1474 break;
1475 case CISS_LV_UNDERGOING_RPI:
1476 status = "Volume undergoing rapid parity initialization";
1477 break;
1478 case CISS_LV_PENDING_RPI:
1479 status = "Volume queued for rapid parity initialization";
1480 break;
1481 case CISS_LV_ENCRYPTED_NO_KEY:
1482 status = "Encrypted volume inaccessible - key not present";
1483 break;
1484 case CISS_LV_UNDERGOING_ENCRYPTION:
1485 status = "Volume undergoing encryption process";
1486 break;
1487 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1488 status = "Volume undergoing encryption re-keying process";
1489 break;
1490 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001491 status = "Volume encrypted but encryption is disabled";
Kevin Barnett6c223762016-06-27 16:41:00 -05001492 break;
1493 case CISS_LV_PENDING_ENCRYPTION:
1494 status = "Volume pending migration to encrypted state";
1495 break;
1496 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1497 status = "Volume pending encryption rekeying";
1498 break;
1499 case CISS_LV_NOT_SUPPORTED:
1500 status = "Volume not supported on this controller";
1501 break;
1502 case CISS_LV_STATUS_UNAVAILABLE:
1503 status = "Volume status not available";
1504 break;
1505 default:
1506 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1507 unknown_state_str, device->volume_status);
1508 status = unknown_state_buffer;
1509 break;
1510 }
1511
1512 dev_info(&ctrl_info->pci_dev->dev,
1513 "scsi %d:%d:%d:%d %s\n",
1514 ctrl_info->scsi_host->host_no,
1515 device->bus, device->target, device->lun, status);
1516}
1517
Kevin Barnett6c223762016-06-27 16:41:00 -05001518static void pqi_rescan_worker(struct work_struct *work)
1519{
1520 struct pqi_ctrl_info *ctrl_info;
1521
1522 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1523 rescan_work);
1524
1525 pqi_scan_scsi_devices(ctrl_info);
1526}
1527
1528static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1529 struct pqi_scsi_dev *device)
1530{
1531 int rc;
1532
1533 if (pqi_is_logical_device(device))
1534 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1535 device->target, device->lun);
1536 else
1537 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1538
1539 return rc;
1540}
1541
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06001542#define PQI_PENDING_IO_TIMEOUT_SECS 20
1543
Kevin Barnett6c223762016-06-27 16:41:00 -05001544static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1545 struct pqi_scsi_dev *device)
1546{
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06001547 int rc;
1548
1549 pqi_device_remove_start(device);
1550
1551 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1552 PQI_PENDING_IO_TIMEOUT_SECS);
1553 if (rc)
1554 dev_err(&ctrl_info->pci_dev->dev,
1555 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1556 ctrl_info->scsi_host->host_no, device->bus,
1557 device->target, device->lun,
1558 atomic_read(&device->scsi_cmds_outstanding));
1559
Kevin Barnett6c223762016-06-27 16:41:00 -05001560 if (pqi_is_logical_device(device))
1561 scsi_remove_device(device->sdev);
1562 else
1563 pqi_remove_sas_device(device);
1564}
1565
1566/* Assumes the SCSI device list lock is held. */
1567
1568static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1569 int bus, int target, int lun)
1570{
1571 struct pqi_scsi_dev *device;
1572
1573 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1574 scsi_device_list_entry)
1575 if (device->bus == bus && device->target == target &&
1576 device->lun == lun)
1577 return device;
1578
1579 return NULL;
1580}
1581
1582static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1583 struct pqi_scsi_dev *dev2)
1584{
1585 if (dev1->is_physical_device != dev2->is_physical_device)
1586 return false;
1587
1588 if (dev1->is_physical_device)
1589 return dev1->wwid == dev2->wwid;
1590
1591 return memcmp(dev1->volume_id, dev2->volume_id,
1592 sizeof(dev1->volume_id)) == 0;
1593}
1594
1595enum pqi_find_result {
1596 DEVICE_NOT_FOUND,
1597 DEVICE_CHANGED,
1598 DEVICE_SAME,
1599};
1600
1601static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1602 struct pqi_scsi_dev *device_to_find,
1603 struct pqi_scsi_dev **matching_device)
1604{
1605 struct pqi_scsi_dev *device;
1606
1607 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1608 scsi_device_list_entry) {
1609 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1610 device->scsi3addr)) {
1611 *matching_device = device;
1612 if (pqi_device_equal(device_to_find, device)) {
1613 if (device_to_find->volume_offline)
1614 return DEVICE_CHANGED;
1615 return DEVICE_SAME;
1616 }
1617 return DEVICE_CHANGED;
1618 }
1619 }
1620
1621 return DEVICE_NOT_FOUND;
1622}
1623
Don Brace3d46a592018-12-07 16:30:05 -06001624static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1625{
1626 if (device->is_expander_smp_device)
1627 return "Enclosure SMP ";
1628
1629 return scsi_device_type(device->devtype);
1630}
1631
Kevin Barnett6de783f2017-05-03 18:55:19 -05001632#define PQI_DEV_INFO_BUFFER_LENGTH 128
1633
Kevin Barnett6c223762016-06-27 16:41:00 -05001634static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1635 char *action, struct pqi_scsi_dev *device)
1636{
Kevin Barnett6de783f2017-05-03 18:55:19 -05001637 ssize_t count;
1638 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1639
1640 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1641 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1642
1643 if (device->target_lun_valid)
1644 count += snprintf(buffer + count,
1645 PQI_DEV_INFO_BUFFER_LENGTH - count,
1646 "%d:%d",
1647 device->target,
1648 device->lun);
1649 else
1650 count += snprintf(buffer + count,
1651 PQI_DEV_INFO_BUFFER_LENGTH - count,
1652 "-:-");
1653
1654 if (pqi_is_logical_device(device))
1655 count += snprintf(buffer + count,
1656 PQI_DEV_INFO_BUFFER_LENGTH - count,
1657 " %08x%08x",
1658 *((u32 *)&device->scsi3addr),
1659 *((u32 *)&device->scsi3addr[4]));
1660 else
1661 count += snprintf(buffer + count,
1662 PQI_DEV_INFO_BUFFER_LENGTH - count,
1663 " %016llx", device->sas_address);
1664
1665 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1666 " %s %.8s %.16s ",
Don Brace3d46a592018-12-07 16:30:05 -06001667 pqi_device_type(device),
Kevin Barnett6c223762016-06-27 16:41:00 -05001668 device->vendor,
Kevin Barnett6de783f2017-05-03 18:55:19 -05001669 device->model);
1670
1671 if (pqi_is_logical_device(device)) {
1672 if (device->devtype == TYPE_DISK)
1673 count += snprintf(buffer + count,
1674 PQI_DEV_INFO_BUFFER_LENGTH - count,
1675 "SSDSmartPathCap%c En%c %-12s",
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001676 device->raid_bypass_configured ? '+' : '-',
1677 device->raid_bypass_enabled ? '+' : '-',
Kevin Barnett6de783f2017-05-03 18:55:19 -05001678 pqi_raid_level_to_string(device->raid_level));
1679 } else {
1680 count += snprintf(buffer + count,
1681 PQI_DEV_INFO_BUFFER_LENGTH - count,
1682 "AIO%c", device->aio_enabled ? '+' : '-');
1683 if (device->devtype == TYPE_DISK ||
1684 device->devtype == TYPE_ZBC)
1685 count += snprintf(buffer + count,
1686 PQI_DEV_INFO_BUFFER_LENGTH - count,
1687 " qd=%-6d", device->queue_depth);
1688 }
1689
1690 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
Kevin Barnett6c223762016-06-27 16:41:00 -05001691}
1692
1693/* Assumes the SCSI device list lock is held. */
1694
1695static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1696 struct pqi_scsi_dev *new_device)
1697{
1698 existing_device->devtype = new_device->devtype;
1699 existing_device->device_type = new_device->device_type;
1700 existing_device->bus = new_device->bus;
1701 if (new_device->target_lun_valid) {
1702 existing_device->target = new_device->target;
1703 existing_device->lun = new_device->lun;
1704 existing_device->target_lun_valid = true;
1705 }
1706
1707 /* By definition, the scsi3addr and wwid fields are already the same. */
1708
1709 existing_device->is_physical_device = new_device->is_physical_device;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001710 existing_device->is_external_raid_device =
1711 new_device->is_external_raid_device;
Don Brace3d46a592018-12-07 16:30:05 -06001712 existing_device->is_expander_smp_device =
1713 new_device->is_expander_smp_device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001714 existing_device->aio_enabled = new_device->aio_enabled;
1715 memcpy(existing_device->vendor, new_device->vendor,
1716 sizeof(existing_device->vendor));
1717 memcpy(existing_device->model, new_device->model,
1718 sizeof(existing_device->model));
1719 existing_device->sas_address = new_device->sas_address;
1720 existing_device->raid_level = new_device->raid_level;
1721 existing_device->queue_depth = new_device->queue_depth;
1722 existing_device->aio_handle = new_device->aio_handle;
1723 existing_device->volume_status = new_device->volume_status;
1724 existing_device->active_path_index = new_device->active_path_index;
1725 existing_device->path_map = new_device->path_map;
1726 existing_device->bay = new_device->bay;
1727 memcpy(existing_device->box, new_device->box,
1728 sizeof(existing_device->box));
1729 memcpy(existing_device->phys_connector, new_device->phys_connector,
1730 sizeof(existing_device->phys_connector));
Kevin Barnett6c223762016-06-27 16:41:00 -05001731 existing_device->offload_to_mirror = 0;
1732 kfree(existing_device->raid_map);
1733 existing_device->raid_map = new_device->raid_map;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001734 existing_device->raid_bypass_configured =
1735 new_device->raid_bypass_configured;
1736 existing_device->raid_bypass_enabled =
1737 new_device->raid_bypass_enabled;
Dave Carrolla9a68102018-12-07 16:29:37 -06001738 existing_device->device_offline = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05001739
1740 /* To prevent this from being freed later. */
1741 new_device->raid_map = NULL;
1742}
1743
1744static inline void pqi_free_device(struct pqi_scsi_dev *device)
1745{
1746 if (device) {
1747 kfree(device->raid_map);
1748 kfree(device);
1749 }
1750}
1751
1752/*
1753 * Called when exposing a new device to the OS fails in order to re-adjust
1754 * our internal SCSI device list to match the SCSI ML's view.
1755 */
1756
1757static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1758 struct pqi_scsi_dev *device)
1759{
1760 unsigned long flags;
1761
1762 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1763 list_del(&device->scsi_device_list_entry);
1764 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1765
1766 /* Allow the device structure to be freed later. */
1767 device->keep_device = false;
1768}
1769
Don Brace3d46a592018-12-07 16:30:05 -06001770static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1771{
1772 if (device->is_expander_smp_device)
1773 return device->sas_port != NULL;
1774
1775 return device->sdev != NULL;
1776}
1777
Kevin Barnett6c223762016-06-27 16:41:00 -05001778static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1779 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1780{
1781 int rc;
1782 unsigned int i;
1783 unsigned long flags;
1784 enum pqi_find_result find_result;
1785 struct pqi_scsi_dev *device;
1786 struct pqi_scsi_dev *next;
1787 struct pqi_scsi_dev *matching_device;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001788 LIST_HEAD(add_list);
1789 LIST_HEAD(delete_list);
Kevin Barnett6c223762016-06-27 16:41:00 -05001790
1791 /*
1792 * The idea here is to do as little work as possible while holding the
1793 * spinlock. That's why we go to great pains to defer anything other
1794 * than updating the internal device list until after we release the
1795 * spinlock.
1796 */
1797
1798 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1799
1800 /* Assume that all devices in the existing list have gone away. */
1801 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1802 scsi_device_list_entry)
1803 device->device_gone = true;
1804
1805 for (i = 0; i < num_new_devices; i++) {
1806 device = new_device_list[i];
1807
1808 find_result = pqi_scsi_find_entry(ctrl_info, device,
1809 &matching_device);
1810
1811 switch (find_result) {
1812 case DEVICE_SAME:
1813 /*
1814 * The newly found device is already in the existing
1815 * device list.
1816 */
1817 device->new_device = false;
1818 matching_device->device_gone = false;
1819 pqi_scsi_update_device(matching_device, device);
1820 break;
1821 case DEVICE_NOT_FOUND:
1822 /*
1823 * The newly found device is NOT in the existing device
1824 * list.
1825 */
1826 device->new_device = true;
1827 break;
1828 case DEVICE_CHANGED:
1829 /*
1830 * The original device has gone away and we need to add
1831 * the new device.
1832 */
1833 device->new_device = true;
1834 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05001835 }
1836 }
1837
1838 /* Process all devices that have gone away. */
1839 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1840 scsi_device_list_entry) {
1841 if (device->device_gone) {
1842 list_del(&device->scsi_device_list_entry);
1843 list_add_tail(&device->delete_list_entry, &delete_list);
1844 }
1845 }
1846
1847 /* Process all new devices. */
1848 for (i = 0; i < num_new_devices; i++) {
1849 device = new_device_list[i];
1850 if (!device->new_device)
1851 continue;
1852 if (device->volume_offline)
1853 continue;
1854 list_add_tail(&device->scsi_device_list_entry,
1855 &ctrl_info->scsi_device_list);
1856 list_add_tail(&device->add_list_entry, &add_list);
1857 /* To prevent this device structure from being freed later. */
1858 device->keep_device = true;
1859 }
1860
Kevin Barnett6c223762016-06-27 16:41:00 -05001861 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1862
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06001863 if (pqi_ctrl_in_ofa(ctrl_info))
1864 pqi_ctrl_ofa_done(ctrl_info);
1865
Kevin Barnett6c223762016-06-27 16:41:00 -05001866 /* Remove all devices that have gone away. */
1867 list_for_each_entry_safe(device, next, &delete_list,
1868 delete_list_entry) {
Kevin Barnett6c223762016-06-27 16:41:00 -05001869 if (device->volume_offline) {
1870 pqi_dev_info(ctrl_info, "offline", device);
1871 pqi_show_volume_status(ctrl_info, device);
1872 } else {
1873 pqi_dev_info(ctrl_info, "removed", device);
1874 }
Don Brace3d46a592018-12-07 16:30:05 -06001875 if (pqi_is_device_added(device))
Kevin Barnett6de783f2017-05-03 18:55:19 -05001876 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001877 list_del(&device->delete_list_entry);
1878 pqi_free_device(device);
1879 }
1880
1881 /*
1882 * Notify the SCSI ML if the queue depth of any existing device has
1883 * changed.
1884 */
1885 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1886 scsi_device_list_entry) {
1887 if (device->sdev && device->queue_depth !=
1888 device->advertised_queue_depth) {
1889 device->advertised_queue_depth = device->queue_depth;
1890 scsi_change_queue_depth(device->sdev,
1891 device->advertised_queue_depth);
1892 }
1893 }
1894
1895 /* Expose any new devices. */
1896 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
Don Brace3d46a592018-12-07 16:30:05 -06001897 if (!pqi_is_device_added(device)) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05001898 pqi_dev_info(ctrl_info, "added", device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001899 rc = pqi_add_device(ctrl_info, device);
1900 if (rc) {
1901 dev_warn(&ctrl_info->pci_dev->dev,
1902 "scsi %d:%d:%d:%d addition failed, device not added\n",
1903 ctrl_info->scsi_host->host_no,
1904 device->bus, device->target,
1905 device->lun);
1906 pqi_fixup_botched_add(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001907 }
1908 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001909 }
1910}
1911
1912static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1913{
Don Brace3d46a592018-12-07 16:30:05 -06001914 bool is_supported;
1915
1916 if (device->is_expander_smp_device)
1917 return true;
1918
1919 is_supported = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05001920
1921 switch (device->devtype) {
1922 case TYPE_DISK:
1923 case TYPE_ZBC:
1924 case TYPE_TAPE:
1925 case TYPE_MEDIUM_CHANGER:
1926 case TYPE_ENCLOSURE:
1927 is_supported = true;
1928 break;
1929 case TYPE_RAID:
1930 /*
1931 * Only support the HBA controller itself as a RAID
1932 * controller. If it's a RAID controller other than
Kevin Barnett376fb882017-05-03 18:54:43 -05001933 * the HBA itself (an external RAID controller, for
1934 * example), we don't support it.
Kevin Barnett6c223762016-06-27 16:41:00 -05001935 */
1936 if (pqi_is_hba_lunid(device->scsi3addr))
1937 is_supported = true;
1938 break;
1939 }
1940
1941 return is_supported;
1942}
1943
Kevin Barnett94086f52017-05-03 18:54:31 -05001944static inline bool pqi_skip_device(u8 *scsi3addr)
Kevin Barnett6c223762016-06-27 16:41:00 -05001945{
Kevin Barnett94086f52017-05-03 18:54:31 -05001946 /* Ignore all masked devices. */
1947 if (MASKED_DEVICE(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05001948 return true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001949
1950 return false;
1951}
1952
Don Brace3d46a592018-12-07 16:30:05 -06001953static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1954{
1955 if (!device->is_physical_device)
1956 return false;
1957
1958 if (device->is_expander_smp_device)
1959 return true;
1960
1961 switch (device->devtype) {
1962 case TYPE_DISK:
1963 case TYPE_ZBC:
1964 case TYPE_ENCLOSURE:
1965 return true;
1966 }
1967
1968 return false;
1969}
1970
Dave Carrollcd128242018-12-07 16:28:47 -06001971static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1972{
1973 return !device->is_physical_device ||
1974 !pqi_skip_device(device->scsi3addr);
1975}
1976
Kevin Barnett6c223762016-06-27 16:41:00 -05001977static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1978{
1979 int i;
1980 int rc;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001981 LIST_HEAD(new_device_list_head);
Kevin Barnett6c223762016-06-27 16:41:00 -05001982 struct report_phys_lun_extended *physdev_list = NULL;
1983 struct report_log_lun_extended *logdev_list = NULL;
1984 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1985 struct report_log_lun_extended_entry *log_lun_ext_entry;
1986 struct bmic_identify_physical_device *id_phys = NULL;
1987 u32 num_physicals;
1988 u32 num_logicals;
1989 struct pqi_scsi_dev **new_device_list = NULL;
1990 struct pqi_scsi_dev *device;
1991 struct pqi_scsi_dev *next;
1992 unsigned int num_new_devices;
1993 unsigned int num_valid_devices;
1994 bool is_physical_device;
1995 u8 *scsi3addr;
1996 static char *out_of_memory_msg =
Kevin Barnett6de783f2017-05-03 18:55:19 -05001997 "failed to allocate memory, device discovery stopped";
Kevin Barnett6c223762016-06-27 16:41:00 -05001998
Kevin Barnett6c223762016-06-27 16:41:00 -05001999 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2000 if (rc)
2001 goto out;
2002
2003 if (physdev_list)
2004 num_physicals =
2005 get_unaligned_be32(&physdev_list->header.list_length)
2006 / sizeof(physdev_list->lun_entries[0]);
2007 else
2008 num_physicals = 0;
2009
2010 if (logdev_list)
2011 num_logicals =
2012 get_unaligned_be32(&logdev_list->header.list_length)
2013 / sizeof(logdev_list->lun_entries[0]);
2014 else
2015 num_logicals = 0;
2016
2017 if (num_physicals) {
2018 /*
2019 * We need this buffer for calls to pqi_get_physical_disk_info()
2020 * below. We allocate it here instead of inside
2021 * pqi_get_physical_disk_info() because it's a fairly large
2022 * buffer.
2023 */
2024 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2025 if (!id_phys) {
2026 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2027 out_of_memory_msg);
2028 rc = -ENOMEM;
2029 goto out;
2030 }
2031 }
2032
2033 num_new_devices = num_physicals + num_logicals;
2034
Kees Cook6da2ec52018-06-12 13:55:00 -07002035 new_device_list = kmalloc_array(num_new_devices,
2036 sizeof(*new_device_list),
2037 GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05002038 if (!new_device_list) {
2039 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2040 rc = -ENOMEM;
2041 goto out;
2042 }
2043
2044 for (i = 0; i < num_new_devices; i++) {
2045 device = kzalloc(sizeof(*device), GFP_KERNEL);
2046 if (!device) {
2047 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2048 out_of_memory_msg);
2049 rc = -ENOMEM;
2050 goto out;
2051 }
2052 list_add_tail(&device->new_device_list_entry,
2053 &new_device_list_head);
2054 }
2055
2056 device = NULL;
2057 num_valid_devices = 0;
2058
2059 for (i = 0; i < num_new_devices; i++) {
2060
2061 if (i < num_physicals) {
2062 is_physical_device = true;
2063 phys_lun_ext_entry = &physdev_list->lun_entries[i];
2064 log_lun_ext_entry = NULL;
2065 scsi3addr = phys_lun_ext_entry->lunid;
2066 } else {
2067 is_physical_device = false;
2068 phys_lun_ext_entry = NULL;
2069 log_lun_ext_entry =
2070 &logdev_list->lun_entries[i - num_physicals];
2071 scsi3addr = log_lun_ext_entry->lunid;
2072 }
2073
Kevin Barnett94086f52017-05-03 18:54:31 -05002074 if (is_physical_device && pqi_skip_device(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05002075 continue;
2076
2077 if (device)
2078 device = list_next_entry(device, new_device_list_entry);
2079 else
2080 device = list_first_entry(&new_device_list_head,
2081 struct pqi_scsi_dev, new_device_list_entry);
2082
2083 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2084 device->is_physical_device = is_physical_device;
Don Brace3d46a592018-12-07 16:30:05 -06002085 if (is_physical_device) {
2086 if (phys_lun_ext_entry->device_type ==
2087 SA_EXPANDER_SMP_DEVICE)
2088 device->is_expander_smp_device = true;
2089 } else {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05002090 device->is_external_raid_device =
2091 pqi_is_external_raid_addr(scsi3addr);
Don Brace3d46a592018-12-07 16:30:05 -06002092 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002093
2094 /* Gather information about the device. */
2095 rc = pqi_get_device_info(ctrl_info, device);
2096 if (rc == -ENOMEM) {
2097 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2098 out_of_memory_msg);
2099 goto out;
2100 }
2101 if (rc) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05002102 if (device->is_physical_device)
2103 dev_warn(&ctrl_info->pci_dev->dev,
2104 "obtaining device info failed, skipping physical device %016llx\n",
2105 get_unaligned_be64(
2106 &phys_lun_ext_entry->wwid));
2107 else
2108 dev_warn(&ctrl_info->pci_dev->dev,
2109 "obtaining device info failed, skipping logical device %08x%08x\n",
2110 *((u32 *)&device->scsi3addr),
2111 *((u32 *)&device->scsi3addr[4]));
Kevin Barnett6c223762016-06-27 16:41:00 -05002112 rc = 0;
2113 continue;
2114 }
2115
2116 if (!pqi_is_supported_device(device))
2117 continue;
2118
2119 pqi_assign_bus_target_lun(device);
2120
Kevin Barnett6c223762016-06-27 16:41:00 -05002121 if (device->is_physical_device) {
2122 device->wwid = phys_lun_ext_entry->wwid;
2123 if ((phys_lun_ext_entry->device_flags &
2124 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
Don Brace3d46a592018-12-07 16:30:05 -06002125 phys_lun_ext_entry->aio_handle) {
Kevin Barnett6c223762016-06-27 16:41:00 -05002126 device->aio_enabled = true;
Don Brace3d46a592018-12-07 16:30:05 -06002127 device->aio_handle =
2128 phys_lun_ext_entry->aio_handle;
2129 }
2130 if (device->devtype == TYPE_DISK ||
2131 device->devtype == TYPE_ZBC) {
2132 pqi_get_physical_disk_info(ctrl_info,
2133 device, id_phys);
2134 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002135 } else {
2136 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2137 sizeof(device->volume_id));
2138 }
2139
Don Brace3d46a592018-12-07 16:30:05 -06002140 if (pqi_is_device_with_sas_address(device))
2141 device->sas_address = get_unaligned_be64(&device->wwid);
Kevin Barnett6c223762016-06-27 16:41:00 -05002142
2143 new_device_list[num_valid_devices++] = device;
2144 }
2145
2146 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2147
2148out:
2149 list_for_each_entry_safe(device, next, &new_device_list_head,
2150 new_device_list_entry) {
2151 if (device->keep_device)
2152 continue;
2153 list_del(&device->new_device_list_entry);
2154 pqi_free_device(device);
2155 }
2156
2157 kfree(new_device_list);
2158 kfree(physdev_list);
2159 kfree(logdev_list);
2160 kfree(id_phys);
2161
2162 return rc;
2163}
2164
2165static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2166{
2167 unsigned long flags;
2168 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002169
Kevin Barnetta37ef742017-05-03 18:52:22 -05002170 while (1) {
2171 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002172
Kevin Barnetta37ef742017-05-03 18:52:22 -05002173 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2174 struct pqi_scsi_dev, scsi_device_list_entry);
2175 if (device)
2176 list_del(&device->scsi_device_list_entry);
2177
2178 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2179 flags);
2180
2181 if (!device)
2182 break;
2183
Don Brace3d46a592018-12-07 16:30:05 -06002184 if (pqi_is_device_added(device))
Kevin Barnett6c223762016-06-27 16:41:00 -05002185 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05002186 pqi_free_device(device);
2187 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002188}
2189
2190static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2191{
2192 int rc;
2193
2194 if (pqi_ctrl_offline(ctrl_info))
2195 return -ENXIO;
2196
2197 mutex_lock(&ctrl_info->scan_mutex);
2198
2199 rc = pqi_update_scsi_devices(ctrl_info);
2200 if (rc)
Kevin Barnett5f310422017-05-03 18:54:55 -05002201 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002202
2203 mutex_unlock(&ctrl_info->scan_mutex);
2204
2205 return rc;
2206}
2207
2208static void pqi_scan_start(struct Scsi_Host *shost)
2209{
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06002210 struct pqi_ctrl_info *ctrl_info;
2211
2212 ctrl_info = shost_to_hba(shost);
2213 if (pqi_ctrl_in_ofa(ctrl_info))
2214 return;
2215
2216 pqi_scan_scsi_devices(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002217}
2218
2219/* Returns TRUE if scan is finished. */
2220
2221static int pqi_scan_finished(struct Scsi_Host *shost,
2222 unsigned long elapsed_time)
2223{
2224 struct pqi_ctrl_info *ctrl_info;
2225
2226 ctrl_info = shost_priv(shost);
2227
2228 return !mutex_is_locked(&ctrl_info->scan_mutex);
2229}
2230
Kevin Barnett061ef062017-05-03 18:53:05 -05002231static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2232{
2233 mutex_lock(&ctrl_info->scan_mutex);
2234 mutex_unlock(&ctrl_info->scan_mutex);
2235}
2236
2237static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2238{
2239 mutex_lock(&ctrl_info->lun_reset_mutex);
2240 mutex_unlock(&ctrl_info->lun_reset_mutex);
2241}
2242
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06002243static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2244{
2245 mutex_lock(&ctrl_info->ofa_mutex);
2246 mutex_unlock(&ctrl_info->ofa_mutex);
2247}
2248
Kevin Barnett6c223762016-06-27 16:41:00 -05002249static inline void pqi_set_encryption_info(
2250 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2251 u64 first_block)
2252{
2253 u32 volume_blk_size;
2254
2255 /*
2256 * Set the encryption tweak values based on logical block address.
2257 * If the block size is 512, the tweak value is equal to the LBA.
2258 * For other block sizes, tweak value is (LBA * block size) / 512.
2259 */
2260 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2261 if (volume_blk_size != 512)
2262 first_block = (first_block * volume_blk_size) / 512;
2263
2264 encryption_info->data_encryption_key_index =
2265 get_unaligned_le16(&raid_map->data_encryption_key_index);
2266 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2267 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2268}
2269
2270/*
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002271 * Attempt to perform RAID bypass mapping for a logical volume I/O.
Kevin Barnett6c223762016-06-27 16:41:00 -05002272 */
2273
2274#define PQI_RAID_BYPASS_INELIGIBLE 1
2275
2276static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2277 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2278 struct pqi_queue_group *queue_group)
2279{
2280 struct raid_map *raid_map;
2281 bool is_write = false;
2282 u32 map_index;
2283 u64 first_block;
2284 u64 last_block;
2285 u32 block_cnt;
2286 u32 blocks_per_row;
2287 u64 first_row;
2288 u64 last_row;
2289 u32 first_row_offset;
2290 u32 last_row_offset;
2291 u32 first_column;
2292 u32 last_column;
2293 u64 r0_first_row;
2294 u64 r0_last_row;
2295 u32 r5or6_blocks_per_row;
2296 u64 r5or6_first_row;
2297 u64 r5or6_last_row;
2298 u32 r5or6_first_row_offset;
2299 u32 r5or6_last_row_offset;
2300 u32 r5or6_first_column;
2301 u32 r5or6_last_column;
2302 u16 data_disks_per_row;
2303 u32 total_disks_per_row;
2304 u16 layout_map_count;
2305 u32 stripesize;
2306 u16 strip_size;
2307 u32 first_group;
2308 u32 last_group;
2309 u32 current_group;
2310 u32 map_row;
2311 u32 aio_handle;
2312 u64 disk_block;
2313 u32 disk_block_cnt;
2314 u8 cdb[16];
2315 u8 cdb_length;
2316 int offload_to_mirror;
2317 struct pqi_encryption_info *encryption_info_ptr;
2318 struct pqi_encryption_info encryption_info;
2319#if BITS_PER_LONG == 32
2320 u64 tmpdiv;
2321#endif
2322
2323 /* Check for valid opcode, get LBA and block count. */
2324 switch (scmd->cmnd[0]) {
2325 case WRITE_6:
2326 is_write = true;
2327 /* fall through */
2328 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002329 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2330 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002331 block_cnt = (u32)scmd->cmnd[4];
2332 if (block_cnt == 0)
2333 block_cnt = 256;
2334 break;
2335 case WRITE_10:
2336 is_write = true;
2337 /* fall through */
2338 case READ_10:
2339 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2340 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2341 break;
2342 case WRITE_12:
2343 is_write = true;
2344 /* fall through */
2345 case READ_12:
2346 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2347 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2348 break;
2349 case WRITE_16:
2350 is_write = true;
2351 /* fall through */
2352 case READ_16:
2353 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2354 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2355 break;
2356 default:
2357 /* Process via normal I/O path. */
2358 return PQI_RAID_BYPASS_INELIGIBLE;
2359 }
2360
2361 /* Check for write to non-RAID-0. */
2362 if (is_write && device->raid_level != SA_RAID_0)
2363 return PQI_RAID_BYPASS_INELIGIBLE;
2364
2365 if (unlikely(block_cnt == 0))
2366 return PQI_RAID_BYPASS_INELIGIBLE;
2367
2368 last_block = first_block + block_cnt - 1;
2369 raid_map = device->raid_map;
2370
2371 /* Check for invalid block or wraparound. */
2372 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2373 last_block < first_block)
2374 return PQI_RAID_BYPASS_INELIGIBLE;
2375
2376 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2377 strip_size = get_unaligned_le16(&raid_map->strip_size);
2378 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2379
2380 /* Calculate stripe information for the request. */
2381 blocks_per_row = data_disks_per_row * strip_size;
2382#if BITS_PER_LONG == 32
2383 tmpdiv = first_block;
2384 do_div(tmpdiv, blocks_per_row);
2385 first_row = tmpdiv;
2386 tmpdiv = last_block;
2387 do_div(tmpdiv, blocks_per_row);
2388 last_row = tmpdiv;
2389 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2390 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2391 tmpdiv = first_row_offset;
2392 do_div(tmpdiv, strip_size);
2393 first_column = tmpdiv;
2394 tmpdiv = last_row_offset;
2395 do_div(tmpdiv, strip_size);
2396 last_column = tmpdiv;
2397#else
2398 first_row = first_block / blocks_per_row;
2399 last_row = last_block / blocks_per_row;
2400 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2401 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2402 first_column = first_row_offset / strip_size;
2403 last_column = last_row_offset / strip_size;
2404#endif
2405
2406 /* If this isn't a single row/column then give to the controller. */
2407 if (first_row != last_row || first_column != last_column)
2408 return PQI_RAID_BYPASS_INELIGIBLE;
2409
2410 /* Proceeding with driver mapping. */
2411 total_disks_per_row = data_disks_per_row +
2412 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2413 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2414 get_unaligned_le16(&raid_map->row_cnt);
2415 map_index = (map_row * total_disks_per_row) + first_column;
2416
2417 /* RAID 1 */
2418 if (device->raid_level == SA_RAID_1) {
2419 if (device->offload_to_mirror)
2420 map_index += data_disks_per_row;
2421 device->offload_to_mirror = !device->offload_to_mirror;
2422 } else if (device->raid_level == SA_RAID_ADM) {
2423 /* RAID ADM */
2424 /*
2425 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2426 * divisible by 3.
2427 */
2428 offload_to_mirror = device->offload_to_mirror;
2429 if (offload_to_mirror == 0) {
2430 /* use physical disk in the first mirrored group. */
2431 map_index %= data_disks_per_row;
2432 } else {
2433 do {
2434 /*
2435 * Determine mirror group that map_index
2436 * indicates.
2437 */
2438 current_group = map_index / data_disks_per_row;
2439
2440 if (offload_to_mirror != current_group) {
2441 if (current_group <
2442 layout_map_count - 1) {
2443 /*
2444 * Select raid index from
2445 * next group.
2446 */
2447 map_index += data_disks_per_row;
2448 current_group++;
2449 } else {
2450 /*
2451 * Select raid index from first
2452 * group.
2453 */
2454 map_index %= data_disks_per_row;
2455 current_group = 0;
2456 }
2457 }
2458 } while (offload_to_mirror != current_group);
2459 }
2460
2461 /* Set mirror group to use next time. */
2462 offload_to_mirror =
2463 (offload_to_mirror >= layout_map_count - 1) ?
2464 0 : offload_to_mirror + 1;
2465 WARN_ON(offload_to_mirror >= layout_map_count);
2466 device->offload_to_mirror = offload_to_mirror;
2467 /*
2468 * Avoid direct use of device->offload_to_mirror within this
2469 * function since multiple threads might simultaneously
2470 * increment it beyond the range of device->layout_map_count -1.
2471 */
2472 } else if ((device->raid_level == SA_RAID_5 ||
2473 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2474 /* RAID 50/60 */
2475 /* Verify first and last block are in same RAID group */
2476 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2477 stripesize = r5or6_blocks_per_row * layout_map_count;
2478#if BITS_PER_LONG == 32
2479 tmpdiv = first_block;
2480 first_group = do_div(tmpdiv, stripesize);
2481 tmpdiv = first_group;
2482 do_div(tmpdiv, r5or6_blocks_per_row);
2483 first_group = tmpdiv;
2484 tmpdiv = last_block;
2485 last_group = do_div(tmpdiv, stripesize);
2486 tmpdiv = last_group;
2487 do_div(tmpdiv, r5or6_blocks_per_row);
2488 last_group = tmpdiv;
2489#else
2490 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2491 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2492#endif
2493 if (first_group != last_group)
2494 return PQI_RAID_BYPASS_INELIGIBLE;
2495
2496 /* Verify request is in a single row of RAID 5/6 */
2497#if BITS_PER_LONG == 32
2498 tmpdiv = first_block;
2499 do_div(tmpdiv, stripesize);
2500 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2501 tmpdiv = last_block;
2502 do_div(tmpdiv, stripesize);
2503 r5or6_last_row = r0_last_row = tmpdiv;
2504#else
2505 first_row = r5or6_first_row = r0_first_row =
2506 first_block / stripesize;
2507 r5or6_last_row = r0_last_row = last_block / stripesize;
2508#endif
2509 if (r5or6_first_row != r5or6_last_row)
2510 return PQI_RAID_BYPASS_INELIGIBLE;
2511
2512 /* Verify request is in a single column */
2513#if BITS_PER_LONG == 32
2514 tmpdiv = first_block;
2515 first_row_offset = do_div(tmpdiv, stripesize);
2516 tmpdiv = first_row_offset;
2517 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2518 r5or6_first_row_offset = first_row_offset;
2519 tmpdiv = last_block;
2520 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2521 tmpdiv = r5or6_last_row_offset;
2522 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2523 tmpdiv = r5or6_first_row_offset;
2524 do_div(tmpdiv, strip_size);
2525 first_column = r5or6_first_column = tmpdiv;
2526 tmpdiv = r5or6_last_row_offset;
2527 do_div(tmpdiv, strip_size);
2528 r5or6_last_column = tmpdiv;
2529#else
2530 first_row_offset = r5or6_first_row_offset =
2531 (u32)((first_block % stripesize) %
2532 r5or6_blocks_per_row);
2533
2534 r5or6_last_row_offset =
2535 (u32)((last_block % stripesize) %
2536 r5or6_blocks_per_row);
2537
2538 first_column = r5or6_first_row_offset / strip_size;
2539 r5or6_first_column = first_column;
2540 r5or6_last_column = r5or6_last_row_offset / strip_size;
2541#endif
2542 if (r5or6_first_column != r5or6_last_column)
2543 return PQI_RAID_BYPASS_INELIGIBLE;
2544
2545 /* Request is eligible */
2546 map_row =
2547 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2548 get_unaligned_le16(&raid_map->row_cnt);
2549
2550 map_index = (first_group *
2551 (get_unaligned_le16(&raid_map->row_cnt) *
2552 total_disks_per_row)) +
2553 (map_row * total_disks_per_row) + first_column;
2554 }
2555
Kevin Barnett6c223762016-06-27 16:41:00 -05002556 aio_handle = raid_map->disk_data[map_index].aio_handle;
2557 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2558 first_row * strip_size +
2559 (first_row_offset - first_column * strip_size);
2560 disk_block_cnt = block_cnt;
2561
2562 /* Handle differing logical/physical block sizes. */
2563 if (raid_map->phys_blk_shift) {
2564 disk_block <<= raid_map->phys_blk_shift;
2565 disk_block_cnt <<= raid_map->phys_blk_shift;
2566 }
2567
2568 if (unlikely(disk_block_cnt > 0xffff))
2569 return PQI_RAID_BYPASS_INELIGIBLE;
2570
2571 /* Build the new CDB for the physical disk I/O. */
2572 if (disk_block > 0xffffffff) {
2573 cdb[0] = is_write ? WRITE_16 : READ_16;
2574 cdb[1] = 0;
2575 put_unaligned_be64(disk_block, &cdb[2]);
2576 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2577 cdb[14] = 0;
2578 cdb[15] = 0;
2579 cdb_length = 16;
2580 } else {
2581 cdb[0] = is_write ? WRITE_10 : READ_10;
2582 cdb[1] = 0;
2583 put_unaligned_be32((u32)disk_block, &cdb[2]);
2584 cdb[6] = 0;
2585 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2586 cdb[9] = 0;
2587 cdb_length = 10;
2588 }
2589
2590 if (get_unaligned_le16(&raid_map->flags) &
2591 RAID_MAP_ENCRYPTION_ENABLED) {
2592 pqi_set_encryption_info(&encryption_info, raid_map,
2593 first_block);
2594 encryption_info_ptr = &encryption_info;
2595 } else {
2596 encryption_info_ptr = NULL;
2597 }
2598
2599 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05002600 cdb, cdb_length, queue_group, encryption_info_ptr, true);
Kevin Barnett6c223762016-06-27 16:41:00 -05002601}
2602
2603#define PQI_STATUS_IDLE 0x0
2604
2605#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2606#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2607
2608#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2609#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2610#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2611#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2612#define PQI_DEVICE_STATE_ERROR 0x4
2613
2614#define PQI_MODE_READY_TIMEOUT_SECS 30
2615#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2616
2617static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2618{
2619 struct pqi_device_registers __iomem *pqi_registers;
2620 unsigned long timeout;
2621 u64 signature;
2622 u8 status;
2623
2624 pqi_registers = ctrl_info->pqi_registers;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06002625 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
Kevin Barnett6c223762016-06-27 16:41:00 -05002626
2627 while (1) {
2628 signature = readq(&pqi_registers->signature);
2629 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2630 sizeof(signature)) == 0)
2631 break;
2632 if (time_after(jiffies, timeout)) {
2633 dev_err(&ctrl_info->pci_dev->dev,
2634 "timed out waiting for PQI signature\n");
2635 return -ETIMEDOUT;
2636 }
2637 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2638 }
2639
2640 while (1) {
2641 status = readb(&pqi_registers->function_and_status_code);
2642 if (status == PQI_STATUS_IDLE)
2643 break;
2644 if (time_after(jiffies, timeout)) {
2645 dev_err(&ctrl_info->pci_dev->dev,
2646 "timed out waiting for PQI IDLE\n");
2647 return -ETIMEDOUT;
2648 }
2649 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2650 }
2651
2652 while (1) {
2653 if (readl(&pqi_registers->device_status) ==
2654 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2655 break;
2656 if (time_after(jiffies, timeout)) {
2657 dev_err(&ctrl_info->pci_dev->dev,
2658 "timed out waiting for PQI all registers ready\n");
2659 return -ETIMEDOUT;
2660 }
2661 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2662 }
2663
2664 return 0;
2665}
2666
2667static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2668{
2669 struct pqi_scsi_dev *device;
2670
2671 device = io_request->scmd->device->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002672 device->raid_bypass_enabled = false;
Kevin Barnett376fb882017-05-03 18:54:43 -05002673 device->aio_enabled = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05002674}
2675
Kevin Barnettd87d5472017-05-03 18:54:00 -05002676static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
Kevin Barnett6c223762016-06-27 16:41:00 -05002677{
2678 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002679 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002680
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002681 device = sdev->hostdata;
2682 if (device->device_offline)
2683 return;
2684
2685 device->device_offline = true;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002686 ctrl_info = shost_to_hba(sdev->host);
2687 pqi_schedule_rescan_worker(ctrl_info);
Dave Carrolla9a68102018-12-07 16:29:37 -06002688 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002689 path, ctrl_info->scsi_host->host_no, device->bus,
2690 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002691}
2692
2693static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2694{
2695 u8 scsi_status;
2696 u8 host_byte;
2697 struct scsi_cmnd *scmd;
2698 struct pqi_raid_error_info *error_info;
2699 size_t sense_data_length;
2700 int residual_count;
2701 int xfer_count;
2702 struct scsi_sense_hdr sshdr;
2703
2704 scmd = io_request->scmd;
2705 if (!scmd)
2706 return;
2707
2708 error_info = io_request->error_info;
2709 scsi_status = error_info->status;
2710 host_byte = DID_OK;
2711
Kevin Barnettf5b63202017-05-03 18:55:07 -05002712 switch (error_info->data_out_result) {
2713 case PQI_DATA_IN_OUT_GOOD:
2714 break;
2715 case PQI_DATA_IN_OUT_UNDERFLOW:
Kevin Barnett6c223762016-06-27 16:41:00 -05002716 xfer_count =
2717 get_unaligned_le32(&error_info->data_out_transferred);
2718 residual_count = scsi_bufflen(scmd) - xfer_count;
2719 scsi_set_resid(scmd, residual_count);
2720 if (xfer_count < scmd->underflow)
2721 host_byte = DID_SOFT_ERROR;
Kevin Barnettf5b63202017-05-03 18:55:07 -05002722 break;
2723 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2724 case PQI_DATA_IN_OUT_ABORTED:
2725 host_byte = DID_ABORT;
2726 break;
2727 case PQI_DATA_IN_OUT_TIMEOUT:
2728 host_byte = DID_TIME_OUT;
2729 break;
2730 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2731 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2732 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2733 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2734 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2735 case PQI_DATA_IN_OUT_ERROR:
2736 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2737 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2738 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2739 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2740 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2741 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2742 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2743 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2744 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2745 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2746 default:
2747 host_byte = DID_ERROR;
2748 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002749 }
2750
2751 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2752 if (sense_data_length == 0)
2753 sense_data_length =
2754 get_unaligned_le16(&error_info->response_data_length);
2755 if (sense_data_length) {
2756 if (sense_data_length > sizeof(error_info->data))
2757 sense_data_length = sizeof(error_info->data);
2758
2759 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2760 scsi_normalize_sense(error_info->data,
2761 sense_data_length, &sshdr) &&
2762 sshdr.sense_key == HARDWARE_ERROR &&
2763 sshdr.asc == 0x3e &&
2764 sshdr.ascq == 0x1) {
Kevin Barnettd87d5472017-05-03 18:54:00 -05002765 pqi_take_device_offline(scmd->device, "RAID");
Kevin Barnett6c223762016-06-27 16:41:00 -05002766 host_byte = DID_NO_CONNECT;
2767 }
2768
2769 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2770 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2771 memcpy(scmd->sense_buffer, error_info->data,
2772 sense_data_length);
2773 }
2774
2775 scmd->result = scsi_status;
2776 set_host_byte(scmd, host_byte);
2777}
2778
2779static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2780{
2781 u8 scsi_status;
2782 u8 host_byte;
2783 struct scsi_cmnd *scmd;
2784 struct pqi_aio_error_info *error_info;
2785 size_t sense_data_length;
2786 int residual_count;
2787 int xfer_count;
2788 bool device_offline;
2789
2790 scmd = io_request->scmd;
2791 error_info = io_request->error_info;
2792 host_byte = DID_OK;
2793 sense_data_length = 0;
2794 device_offline = false;
2795
2796 switch (error_info->service_response) {
2797 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2798 scsi_status = error_info->status;
2799 break;
2800 case PQI_AIO_SERV_RESPONSE_FAILURE:
2801 switch (error_info->status) {
2802 case PQI_AIO_STATUS_IO_ABORTED:
2803 scsi_status = SAM_STAT_TASK_ABORTED;
2804 break;
2805 case PQI_AIO_STATUS_UNDERRUN:
2806 scsi_status = SAM_STAT_GOOD;
2807 residual_count = get_unaligned_le32(
2808 &error_info->residual_count);
2809 scsi_set_resid(scmd, residual_count);
2810 xfer_count = scsi_bufflen(scmd) - residual_count;
2811 if (xfer_count < scmd->underflow)
2812 host_byte = DID_SOFT_ERROR;
2813 break;
2814 case PQI_AIO_STATUS_OVERRUN:
2815 scsi_status = SAM_STAT_GOOD;
2816 break;
2817 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2818 pqi_aio_path_disabled(io_request);
2819 scsi_status = SAM_STAT_GOOD;
2820 io_request->status = -EAGAIN;
2821 break;
2822 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2823 case PQI_AIO_STATUS_INVALID_DEVICE:
Kevin Barnett376fb882017-05-03 18:54:43 -05002824 if (!io_request->raid_bypass) {
2825 device_offline = true;
2826 pqi_take_device_offline(scmd->device, "AIO");
2827 host_byte = DID_NO_CONNECT;
2828 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002829 scsi_status = SAM_STAT_CHECK_CONDITION;
2830 break;
2831 case PQI_AIO_STATUS_IO_ERROR:
2832 default:
2833 scsi_status = SAM_STAT_CHECK_CONDITION;
2834 break;
2835 }
2836 break;
2837 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2838 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2839 scsi_status = SAM_STAT_GOOD;
2840 break;
2841 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2842 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2843 default:
2844 scsi_status = SAM_STAT_CHECK_CONDITION;
2845 break;
2846 }
2847
2848 if (error_info->data_present) {
2849 sense_data_length =
2850 get_unaligned_le16(&error_info->data_length);
2851 if (sense_data_length) {
2852 if (sense_data_length > sizeof(error_info->data))
2853 sense_data_length = sizeof(error_info->data);
2854 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2855 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2856 memcpy(scmd->sense_buffer, error_info->data,
2857 sense_data_length);
2858 }
2859 }
2860
2861 if (device_offline && sense_data_length == 0)
2862 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2863 0x3e, 0x1);
2864
2865 scmd->result = scsi_status;
2866 set_host_byte(scmd, host_byte);
2867}
2868
2869static void pqi_process_io_error(unsigned int iu_type,
2870 struct pqi_io_request *io_request)
2871{
2872 switch (iu_type) {
2873 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2874 pqi_process_raid_io_error(io_request);
2875 break;
2876 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2877 pqi_process_aio_io_error(io_request);
2878 break;
2879 }
2880}
2881
2882static int pqi_interpret_task_management_response(
2883 struct pqi_task_management_response *response)
2884{
2885 int rc;
2886
2887 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002888 case SOP_TMF_COMPLETE:
2889 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002890 rc = 0;
2891 break;
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06002892 case SOP_TMF_REJECTED:
2893 rc = -EAGAIN;
2894 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002895 default:
2896 rc = -EIO;
2897 break;
2898 }
2899
2900 return rc;
2901}
2902
2903static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2904 struct pqi_queue_group *queue_group)
2905{
2906 unsigned int num_responses;
2907 pqi_index_t oq_pi;
2908 pqi_index_t oq_ci;
2909 struct pqi_io_request *io_request;
2910 struct pqi_io_response *response;
2911 u16 request_id;
2912
2913 num_responses = 0;
2914 oq_ci = queue_group->oq_ci_copy;
2915
2916 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002917 oq_pi = readl(queue_group->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05002918 if (oq_pi == oq_ci)
2919 break;
2920
2921 num_responses++;
2922 response = queue_group->oq_element_array +
2923 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2924
2925 request_id = get_unaligned_le16(&response->request_id);
2926 WARN_ON(request_id >= ctrl_info->max_io_slots);
2927
2928 io_request = &ctrl_info->io_request_pool[request_id];
2929 WARN_ON(atomic_read(&io_request->refcount) == 0);
2930
2931 switch (response->header.iu_type) {
2932 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2933 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
Kevin Barnett2ba55c92018-12-07 16:29:51 -06002934 if (io_request->scmd)
2935 io_request->scmd->result = 0;
2936 /* fall through */
Kevin Barnett6c223762016-06-27 16:41:00 -05002937 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2938 break;
Kevin Barnettb212c252018-12-07 16:28:10 -06002939 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2940 io_request->status =
2941 get_unaligned_le16(
2942 &((struct pqi_vendor_general_response *)
2943 response)->status);
2944 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002945 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2946 io_request->status =
2947 pqi_interpret_task_management_response(
2948 (void *)response);
2949 break;
2950 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2951 pqi_aio_path_disabled(io_request);
2952 io_request->status = -EAGAIN;
2953 break;
2954 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2955 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2956 io_request->error_info = ctrl_info->error_buffer +
2957 (get_unaligned_le16(&response->error_index) *
2958 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2959 pqi_process_io_error(response->header.iu_type,
2960 io_request);
2961 break;
2962 default:
2963 dev_err(&ctrl_info->pci_dev->dev,
2964 "unexpected IU type: 0x%x\n",
2965 response->header.iu_type);
Kevin Barnett6c223762016-06-27 16:41:00 -05002966 break;
2967 }
2968
2969 io_request->io_complete_callback(io_request,
2970 io_request->context);
2971
2972 /*
2973 * Note that the I/O request structure CANNOT BE TOUCHED after
2974 * returning from the I/O completion callback!
2975 */
2976
2977 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2978 }
2979
2980 if (num_responses) {
2981 queue_group->oq_ci_copy = oq_ci;
2982 writel(oq_ci, queue_group->oq_ci);
2983 }
2984
2985 return num_responses;
2986}
2987
2988static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002989 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002990{
2991 unsigned int num_elements_used;
2992
2993 if (pi >= ci)
2994 num_elements_used = pi - ci;
2995 else
2996 num_elements_used = elements_in_queue - ci + pi;
2997
2998 return elements_in_queue - num_elements_used - 1;
2999}
3000
Kevin Barnett98f87662017-05-03 18:53:11 -05003001static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05003002 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3003{
3004 pqi_index_t iq_pi;
3005 pqi_index_t iq_ci;
3006 unsigned long flags;
3007 void *next_element;
Kevin Barnett6c223762016-06-27 16:41:00 -05003008 struct pqi_queue_group *queue_group;
3009
3010 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3011 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3012
Kevin Barnett6c223762016-06-27 16:41:00 -05003013 while (1) {
3014 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3015
3016 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003017 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003018
3019 if (pqi_num_elements_free(iq_pi, iq_ci,
3020 ctrl_info->num_elements_per_iq))
3021 break;
3022
3023 spin_unlock_irqrestore(
3024 &queue_group->submit_lock[RAID_PATH], flags);
3025
Kevin Barnett98f87662017-05-03 18:53:11 -05003026 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003027 return;
Kevin Barnett6c223762016-06-27 16:41:00 -05003028 }
3029
3030 next_element = queue_group->iq_element_array[RAID_PATH] +
3031 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3032
3033 memcpy(next_element, iu, iu_length);
3034
3035 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
Kevin Barnett6c223762016-06-27 16:41:00 -05003036 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3037
3038 /*
3039 * This write notifies the controller that an IU is available to be
3040 * processed.
3041 */
3042 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3043
3044 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05003045}
3046
3047static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3048 struct pqi_event *event)
3049{
3050 struct pqi_event_acknowledge_request request;
3051
3052 memset(&request, 0, sizeof(request));
3053
3054 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3055 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3056 &request.header.iu_length);
3057 request.event_type = event->event_type;
3058 request.event_id = event->event_id;
3059 request.additional_event_id = event->additional_event_id;
3060
Kevin Barnett98f87662017-05-03 18:53:11 -05003061 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
Kevin Barnett6c223762016-06-27 16:41:00 -05003062}
3063
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003064#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3065#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3066
3067static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3068 struct pqi_ctrl_info *ctrl_info)
3069{
3070 unsigned long timeout;
3071 u8 status;
3072
3073 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3074
3075 while (1) {
3076 status = pqi_read_soft_reset_status(ctrl_info);
3077 if (status & PQI_SOFT_RESET_INITIATE)
3078 return RESET_INITIATE_DRIVER;
3079
3080 if (status & PQI_SOFT_RESET_ABORT)
3081 return RESET_ABORT;
3082
3083 if (time_after(jiffies, timeout)) {
3084 dev_err(&ctrl_info->pci_dev->dev,
3085 "timed out waiting for soft reset status\n");
3086 return RESET_TIMEDOUT;
3087 }
3088
3089 if (!sis_is_firmware_running(ctrl_info))
3090 return RESET_NORESPONSE;
3091
3092 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3093 }
3094}
3095
3096static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3097 enum pqi_soft_reset_status reset_status)
3098{
3099 int rc;
3100
3101 switch (reset_status) {
3102 case RESET_INITIATE_DRIVER:
3103 /* fall through */
3104 case RESET_TIMEDOUT:
3105 dev_info(&ctrl_info->pci_dev->dev,
3106 "resetting controller %u\n", ctrl_info->ctrl_id);
3107 sis_soft_reset(ctrl_info);
3108 /* fall through */
3109 case RESET_INITIATE_FIRMWARE:
3110 rc = pqi_ofa_ctrl_restart(ctrl_info);
3111 pqi_ofa_free_host_buffer(ctrl_info);
3112 dev_info(&ctrl_info->pci_dev->dev,
3113 "Online Firmware Activation for controller %u: %s\n",
3114 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3115 break;
3116 case RESET_ABORT:
3117 pqi_ofa_ctrl_unquiesce(ctrl_info);
3118 dev_info(&ctrl_info->pci_dev->dev,
3119 "Online Firmware Activation for controller %u: %s\n",
3120 ctrl_info->ctrl_id, "ABORTED");
3121 break;
3122 case RESET_NORESPONSE:
3123 pqi_ofa_free_host_buffer(ctrl_info);
3124 pqi_take_ctrl_offline(ctrl_info);
3125 break;
3126 }
3127}
3128
3129static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3130 struct pqi_event *event)
3131{
3132 u16 event_id;
3133 enum pqi_soft_reset_status status;
3134
3135 event_id = get_unaligned_le16(&event->event_id);
3136
3137 mutex_lock(&ctrl_info->ofa_mutex);
3138
3139 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3140 dev_info(&ctrl_info->pci_dev->dev,
3141 "Received Online Firmware Activation quiesce event for controller %u\n",
3142 ctrl_info->ctrl_id);
3143 pqi_ofa_ctrl_quiesce(ctrl_info);
3144 pqi_acknowledge_event(ctrl_info, event);
3145 if (ctrl_info->soft_reset_handshake_supported) {
3146 status = pqi_poll_for_soft_reset_status(ctrl_info);
3147 pqi_process_soft_reset(ctrl_info, status);
3148 } else {
3149 pqi_process_soft_reset(ctrl_info,
3150 RESET_INITIATE_FIRMWARE);
3151 }
3152
3153 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3154 pqi_acknowledge_event(ctrl_info, event);
3155 pqi_ofa_setup_host_buffer(ctrl_info,
3156 le32_to_cpu(event->ofa_bytes_requested));
3157 pqi_ofa_host_memory_update(ctrl_info);
3158 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3159 pqi_ofa_free_host_buffer(ctrl_info);
3160 pqi_acknowledge_event(ctrl_info, event);
3161 dev_info(&ctrl_info->pci_dev->dev,
3162 "Online Firmware Activation(%u) cancel reason : %u\n",
3163 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3164 }
3165
3166 mutex_unlock(&ctrl_info->ofa_mutex);
3167}
3168
Kevin Barnett6c223762016-06-27 16:41:00 -05003169static void pqi_event_worker(struct work_struct *work)
3170{
3171 unsigned int i;
3172 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003173 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05003174
3175 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3176
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003177 pqi_ctrl_busy(ctrl_info);
3178 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
Kevin Barnett5f310422017-05-03 18:54:55 -05003179 if (pqi_ctrl_offline(ctrl_info))
3180 goto out;
3181
3182 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003183
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003184 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05003185 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003186 if (event->pending) {
3187 event->pending = false;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003188 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3189 pqi_ctrl_unbusy(ctrl_info);
3190 pqi_ofa_process_event(ctrl_info, event);
3191 return;
3192 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003193 pqi_acknowledge_event(ctrl_info, event);
Kevin Barnett6c223762016-06-27 16:41:00 -05003194 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003195 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05003196 }
3197
Kevin Barnett5f310422017-05-03 18:54:55 -05003198out:
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003199 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003200}
3201
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003202#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
Kevin Barnett6c223762016-06-27 16:41:00 -05003203
Kees Cook74a0f572017-10-11 16:27:10 -07003204static void pqi_heartbeat_timer_handler(struct timer_list *t)
Kevin Barnett6c223762016-06-27 16:41:00 -05003205{
3206 int num_interrupts;
Kevin Barnett98f87662017-05-03 18:53:11 -05003207 u32 heartbeat_count;
Kees Cook74a0f572017-10-11 16:27:10 -07003208 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3209 heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003210
Kevin Barnett98f87662017-05-03 18:53:11 -05003211 pqi_check_ctrl_health(ctrl_info);
3212 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett061ef062017-05-03 18:53:05 -05003213 return;
3214
Kevin Barnett6c223762016-06-27 16:41:00 -05003215 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05003216 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003217
3218 if (num_interrupts == ctrl_info->previous_num_interrupts) {
Kevin Barnett98f87662017-05-03 18:53:11 -05003219 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3220 dev_err(&ctrl_info->pci_dev->dev,
3221 "no heartbeat detected - last heartbeat count: %u\n",
3222 heartbeat_count);
Kevin Barnett6c223762016-06-27 16:41:00 -05003223 pqi_take_ctrl_offline(ctrl_info);
3224 return;
3225 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003226 } else {
Kevin Barnett98f87662017-05-03 18:53:11 -05003227 ctrl_info->previous_num_interrupts = num_interrupts;
Kevin Barnett6c223762016-06-27 16:41:00 -05003228 }
3229
Kevin Barnett98f87662017-05-03 18:53:11 -05003230 ctrl_info->previous_heartbeat_count = heartbeat_count;
Kevin Barnett6c223762016-06-27 16:41:00 -05003231 mod_timer(&ctrl_info->heartbeat_timer,
3232 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3233}
3234
3235static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3236{
Kevin Barnett98f87662017-05-03 18:53:11 -05003237 if (!ctrl_info->heartbeat_counter)
3238 return;
3239
Kevin Barnett6c223762016-06-27 16:41:00 -05003240 ctrl_info->previous_num_interrupts =
3241 atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05003242 ctrl_info->previous_heartbeat_count =
3243 pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003244
Kevin Barnett6c223762016-06-27 16:41:00 -05003245 ctrl_info->heartbeat_timer.expires =
3246 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
Kevin Barnett061ef062017-05-03 18:53:05 -05003247 add_timer(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003248}
3249
3250static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3251{
Kevin Barnett98f87662017-05-03 18:53:11 -05003252 del_timer_sync(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05003253}
3254
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003255static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05003256{
3257 int index;
3258
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003259 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3260 if (event_type == pqi_supported_event_types[index])
3261 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003262
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003263 return -1;
3264}
3265
3266static inline bool pqi_is_supported_event(unsigned int event_type)
3267{
3268 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05003269}
3270
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003271static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3272 struct pqi_event_response *response)
3273{
3274 u16 event_id;
3275
3276 event_id = get_unaligned_le16(&event->event_id);
3277
3278 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3279 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3280 event->ofa_bytes_requested =
3281 response->data.ofa_memory_allocation.bytes_requested;
3282 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3283 event->ofa_cancel_reason =
3284 response->data.ofa_cancelled.reason;
3285 }
3286 }
3287}
3288
Kevin Barnett6c223762016-06-27 16:41:00 -05003289static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3290{
3291 unsigned int num_events;
3292 pqi_index_t oq_pi;
3293 pqi_index_t oq_ci;
3294 struct pqi_event_queue *event_queue;
3295 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003296 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05003297 int event_index;
3298
3299 event_queue = &ctrl_info->event_queue;
3300 num_events = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05003301 oq_ci = event_queue->oq_ci_copy;
3302
3303 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003304 oq_pi = readl(event_queue->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003305 if (oq_pi == oq_ci)
3306 break;
3307
3308 num_events++;
3309 response = event_queue->oq_element_array +
3310 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3311
3312 event_index =
3313 pqi_event_type_to_event_index(response->event_type);
3314
3315 if (event_index >= 0) {
3316 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05003317 event = &ctrl_info->events[event_index];
3318 event->pending = true;
3319 event->event_type = response->event_type;
3320 event->event_id = response->event_id;
3321 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05003322 response->additional_event_id;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003323 pqi_ofa_capture_event_payload(event, response);
Kevin Barnett6c223762016-06-27 16:41:00 -05003324 }
3325 }
3326
3327 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3328 }
3329
3330 if (num_events) {
3331 event_queue->oq_ci_copy = oq_ci;
3332 writel(oq_ci, event_queue->oq_ci);
Kevin Barnett98f87662017-05-03 18:53:11 -05003333 schedule_work(&ctrl_info->event_work);
Kevin Barnett6c223762016-06-27 16:41:00 -05003334 }
3335
3336 return num_events;
3337}
3338
Kevin Barnett061ef062017-05-03 18:53:05 -05003339#define PQI_LEGACY_INTX_MASK 0x1
3340
3341static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3342 bool enable_intx)
3343{
3344 u32 intx_mask;
3345 struct pqi_device_registers __iomem *pqi_registers;
3346 volatile void __iomem *register_addr;
3347
3348 pqi_registers = ctrl_info->pqi_registers;
3349
3350 if (enable_intx)
3351 register_addr = &pqi_registers->legacy_intx_mask_clear;
3352 else
3353 register_addr = &pqi_registers->legacy_intx_mask_set;
3354
3355 intx_mask = readl(register_addr);
3356 intx_mask |= PQI_LEGACY_INTX_MASK;
3357 writel(intx_mask, register_addr);
3358}
3359
3360static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3361 enum pqi_irq_mode new_mode)
3362{
3363 switch (ctrl_info->irq_mode) {
3364 case IRQ_MODE_MSIX:
3365 switch (new_mode) {
3366 case IRQ_MODE_MSIX:
3367 break;
3368 case IRQ_MODE_INTX:
3369 pqi_configure_legacy_intx(ctrl_info, true);
Kevin Barnett061ef062017-05-03 18:53:05 -05003370 sis_enable_intx(ctrl_info);
3371 break;
3372 case IRQ_MODE_NONE:
Kevin Barnett061ef062017-05-03 18:53:05 -05003373 break;
3374 }
3375 break;
3376 case IRQ_MODE_INTX:
3377 switch (new_mode) {
3378 case IRQ_MODE_MSIX:
3379 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003380 sis_enable_msix(ctrl_info);
3381 break;
3382 case IRQ_MODE_INTX:
3383 break;
3384 case IRQ_MODE_NONE:
3385 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003386 break;
3387 }
3388 break;
3389 case IRQ_MODE_NONE:
3390 switch (new_mode) {
3391 case IRQ_MODE_MSIX:
3392 sis_enable_msix(ctrl_info);
3393 break;
3394 case IRQ_MODE_INTX:
3395 pqi_configure_legacy_intx(ctrl_info, true);
3396 sis_enable_intx(ctrl_info);
3397 break;
3398 case IRQ_MODE_NONE:
3399 break;
3400 }
3401 break;
3402 }
3403
3404 ctrl_info->irq_mode = new_mode;
3405}
3406
3407#define PQI_LEGACY_INTX_PENDING 0x1
3408
3409static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3410{
3411 bool valid_irq;
3412 u32 intx_status;
3413
3414 switch (ctrl_info->irq_mode) {
3415 case IRQ_MODE_MSIX:
3416 valid_irq = true;
3417 break;
3418 case IRQ_MODE_INTX:
3419 intx_status =
3420 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3421 if (intx_status & PQI_LEGACY_INTX_PENDING)
3422 valid_irq = true;
3423 else
3424 valid_irq = false;
3425 break;
3426 case IRQ_MODE_NONE:
3427 default:
3428 valid_irq = false;
3429 break;
3430 }
3431
3432 return valid_irq;
3433}
3434
Kevin Barnett6c223762016-06-27 16:41:00 -05003435static irqreturn_t pqi_irq_handler(int irq, void *data)
3436{
3437 struct pqi_ctrl_info *ctrl_info;
3438 struct pqi_queue_group *queue_group;
3439 unsigned int num_responses_handled;
3440
3441 queue_group = data;
3442 ctrl_info = queue_group->ctrl_info;
3443
Kevin Barnett061ef062017-05-03 18:53:05 -05003444 if (!pqi_is_valid_irq(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003445 return IRQ_NONE;
3446
3447 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3448
3449 if (irq == ctrl_info->event_irq)
3450 num_responses_handled += pqi_process_event_intr(ctrl_info);
3451
3452 if (num_responses_handled)
3453 atomic_inc(&ctrl_info->num_interrupts);
3454
3455 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3456 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3457
3458 return IRQ_HANDLED;
3459}
3460
3461static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3462{
Kevin Barnettd91d7822017-05-03 18:53:30 -05003463 struct pci_dev *pci_dev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05003464 int i;
3465 int rc;
3466
Kevin Barnettd91d7822017-05-03 18:53:30 -05003467 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05003468
3469 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003470 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
Christoph Hellwig52198222016-11-01 08:12:49 -06003471 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003472 if (rc) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003473 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05003474 "irq %u init failed with error %d\n",
Kevin Barnettd91d7822017-05-03 18:53:30 -05003475 pci_irq_vector(pci_dev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05003476 return rc;
3477 }
3478 ctrl_info->num_msix_vectors_initialized++;
3479 }
3480
3481 return 0;
3482}
3483
Kevin Barnett98bf0612017-05-03 18:52:28 -05003484static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3485{
3486 int i;
3487
3488 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3489 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3490 &ctrl_info->queue_groups[i]);
3491
3492 ctrl_info->num_msix_vectors_initialized = 0;
3493}
3494
Kevin Barnett6c223762016-06-27 16:41:00 -05003495static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3496{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003497 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003498
Kevin Barnett98bf0612017-05-03 18:52:28 -05003499 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003500 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3501 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003502 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003503 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003504 "MSI-X init failed with error %d\n",
3505 num_vectors_enabled);
3506 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003507 }
3508
Kevin Barnett98bf0612017-05-03 18:52:28 -05003509 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
Kevin Barnett061ef062017-05-03 18:53:05 -05003510 ctrl_info->irq_mode = IRQ_MODE_MSIX;
Kevin Barnett6c223762016-06-27 16:41:00 -05003511 return 0;
3512}
3513
Kevin Barnett98bf0612017-05-03 18:52:28 -05003514static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3515{
3516 if (ctrl_info->num_msix_vectors_enabled) {
3517 pci_free_irq_vectors(ctrl_info->pci_dev);
3518 ctrl_info->num_msix_vectors_enabled = 0;
3519 }
3520}
3521
Kevin Barnett6c223762016-06-27 16:41:00 -05003522static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3523{
3524 unsigned int i;
3525 size_t alloc_length;
3526 size_t element_array_length_per_iq;
3527 size_t element_array_length_per_oq;
3528 void *element_array;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003529 void __iomem *next_queue_index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003530 void *aligned_pointer;
3531 unsigned int num_inbound_queues;
3532 unsigned int num_outbound_queues;
3533 unsigned int num_queue_indexes;
3534 struct pqi_queue_group *queue_group;
3535
3536 element_array_length_per_iq =
3537 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3538 ctrl_info->num_elements_per_iq;
3539 element_array_length_per_oq =
3540 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3541 ctrl_info->num_elements_per_oq;
3542 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3543 num_outbound_queues = ctrl_info->num_queue_groups;
3544 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3545
3546 aligned_pointer = NULL;
3547
3548 for (i = 0; i < num_inbound_queues; i++) {
3549 aligned_pointer = PTR_ALIGN(aligned_pointer,
3550 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3551 aligned_pointer += element_array_length_per_iq;
3552 }
3553
3554 for (i = 0; i < num_outbound_queues; i++) {
3555 aligned_pointer = PTR_ALIGN(aligned_pointer,
3556 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3557 aligned_pointer += element_array_length_per_oq;
3558 }
3559
3560 aligned_pointer = PTR_ALIGN(aligned_pointer,
3561 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3562 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3563 PQI_EVENT_OQ_ELEMENT_LENGTH;
3564
3565 for (i = 0; i < num_queue_indexes; i++) {
3566 aligned_pointer = PTR_ALIGN(aligned_pointer,
3567 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3568 aligned_pointer += sizeof(pqi_index_t);
3569 }
3570
3571 alloc_length = (size_t)aligned_pointer +
3572 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3573
Kevin Barnette1d213b2017-05-03 18:53:18 -05003574 alloc_length += PQI_EXTRA_SGL_MEMORY;
3575
Kevin Barnett6c223762016-06-27 16:41:00 -05003576 ctrl_info->queue_memory_base =
3577 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3578 alloc_length,
3579 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3580
Kevin Barnettd87d5472017-05-03 18:54:00 -05003581 if (!ctrl_info->queue_memory_base)
Kevin Barnett6c223762016-06-27 16:41:00 -05003582 return -ENOMEM;
Kevin Barnett6c223762016-06-27 16:41:00 -05003583
3584 ctrl_info->queue_memory_length = alloc_length;
3585
3586 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3587 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3588
3589 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3590 queue_group = &ctrl_info->queue_groups[i];
3591 queue_group->iq_element_array[RAID_PATH] = element_array;
3592 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3593 ctrl_info->queue_memory_base_dma_handle +
3594 (element_array - ctrl_info->queue_memory_base);
3595 element_array += element_array_length_per_iq;
3596 element_array = PTR_ALIGN(element_array,
3597 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3598 queue_group->iq_element_array[AIO_PATH] = element_array;
3599 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3600 ctrl_info->queue_memory_base_dma_handle +
3601 (element_array - ctrl_info->queue_memory_base);
3602 element_array += element_array_length_per_iq;
3603 element_array = PTR_ALIGN(element_array,
3604 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3605 }
3606
3607 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3608 queue_group = &ctrl_info->queue_groups[i];
3609 queue_group->oq_element_array = element_array;
3610 queue_group->oq_element_array_bus_addr =
3611 ctrl_info->queue_memory_base_dma_handle +
3612 (element_array - ctrl_info->queue_memory_base);
3613 element_array += element_array_length_per_oq;
3614 element_array = PTR_ALIGN(element_array,
3615 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3616 }
3617
3618 ctrl_info->event_queue.oq_element_array = element_array;
3619 ctrl_info->event_queue.oq_element_array_bus_addr =
3620 ctrl_info->queue_memory_base_dma_handle +
3621 (element_array - ctrl_info->queue_memory_base);
3622 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3623 PQI_EVENT_OQ_ELEMENT_LENGTH;
3624
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003625 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
Kevin Barnett6c223762016-06-27 16:41:00 -05003626 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3627
3628 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3629 queue_group = &ctrl_info->queue_groups[i];
3630 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3631 queue_group->iq_ci_bus_addr[RAID_PATH] =
3632 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003633 (next_queue_index -
3634 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003635 next_queue_index += sizeof(pqi_index_t);
3636 next_queue_index = PTR_ALIGN(next_queue_index,
3637 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3638 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3639 queue_group->iq_ci_bus_addr[AIO_PATH] =
3640 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003641 (next_queue_index -
3642 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003643 next_queue_index += sizeof(pqi_index_t);
3644 next_queue_index = PTR_ALIGN(next_queue_index,
3645 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3646 queue_group->oq_pi = next_queue_index;
3647 queue_group->oq_pi_bus_addr =
3648 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003649 (next_queue_index -
3650 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003651 next_queue_index += sizeof(pqi_index_t);
3652 next_queue_index = PTR_ALIGN(next_queue_index,
3653 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3654 }
3655
3656 ctrl_info->event_queue.oq_pi = next_queue_index;
3657 ctrl_info->event_queue.oq_pi_bus_addr =
3658 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003659 (next_queue_index -
3660 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003661
3662 return 0;
3663}
3664
3665static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3666{
3667 unsigned int i;
3668 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3669 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3670
3671 /*
3672 * Initialize the backpointers to the controller structure in
3673 * each operational queue group structure.
3674 */
3675 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3676 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3677
3678 /*
3679 * Assign IDs to all operational queues. Note that the IDs
3680 * assigned to operational IQs are independent of the IDs
3681 * assigned to operational OQs.
3682 */
3683 ctrl_info->event_queue.oq_id = next_oq_id++;
3684 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3685 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3686 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3687 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3688 }
3689
3690 /*
3691 * Assign MSI-X table entry indexes to all queues. Note that the
3692 * interrupt for the event queue is shared with the first queue group.
3693 */
3694 ctrl_info->event_queue.int_msg_num = 0;
3695 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3696 ctrl_info->queue_groups[i].int_msg_num = i;
3697
3698 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3699 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3700 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3701 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3702 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3703 }
3704}
3705
3706static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3707{
3708 size_t alloc_length;
3709 struct pqi_admin_queues_aligned *admin_queues_aligned;
3710 struct pqi_admin_queues *admin_queues;
3711
3712 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3713 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3714
3715 ctrl_info->admin_queue_memory_base =
3716 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3717 alloc_length,
3718 &ctrl_info->admin_queue_memory_base_dma_handle,
3719 GFP_KERNEL);
3720
3721 if (!ctrl_info->admin_queue_memory_base)
3722 return -ENOMEM;
3723
3724 ctrl_info->admin_queue_memory_length = alloc_length;
3725
3726 admin_queues = &ctrl_info->admin_queues;
3727 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3728 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3729 admin_queues->iq_element_array =
3730 &admin_queues_aligned->iq_element_array;
3731 admin_queues->oq_element_array =
3732 &admin_queues_aligned->oq_element_array;
3733 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003734 admin_queues->oq_pi =
3735 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
Kevin Barnett6c223762016-06-27 16:41:00 -05003736
3737 admin_queues->iq_element_array_bus_addr =
3738 ctrl_info->admin_queue_memory_base_dma_handle +
3739 (admin_queues->iq_element_array -
3740 ctrl_info->admin_queue_memory_base);
3741 admin_queues->oq_element_array_bus_addr =
3742 ctrl_info->admin_queue_memory_base_dma_handle +
3743 (admin_queues->oq_element_array -
3744 ctrl_info->admin_queue_memory_base);
3745 admin_queues->iq_ci_bus_addr =
3746 ctrl_info->admin_queue_memory_base_dma_handle +
3747 ((void *)admin_queues->iq_ci -
3748 ctrl_info->admin_queue_memory_base);
3749 admin_queues->oq_pi_bus_addr =
3750 ctrl_info->admin_queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003751 ((void __iomem *)admin_queues->oq_pi -
3752 (void __iomem *)ctrl_info->admin_queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003753
3754 return 0;
3755}
3756
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003757#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
Kevin Barnett6c223762016-06-27 16:41:00 -05003758#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3759
3760static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3761{
3762 struct pqi_device_registers __iomem *pqi_registers;
3763 struct pqi_admin_queues *admin_queues;
3764 unsigned long timeout;
3765 u8 status;
3766 u32 reg;
3767
3768 pqi_registers = ctrl_info->pqi_registers;
3769 admin_queues = &ctrl_info->admin_queues;
3770
3771 writeq((u64)admin_queues->iq_element_array_bus_addr,
3772 &pqi_registers->admin_iq_element_array_addr);
3773 writeq((u64)admin_queues->oq_element_array_bus_addr,
3774 &pqi_registers->admin_oq_element_array_addr);
3775 writeq((u64)admin_queues->iq_ci_bus_addr,
3776 &pqi_registers->admin_iq_ci_addr);
3777 writeq((u64)admin_queues->oq_pi_bus_addr,
3778 &pqi_registers->admin_oq_pi_addr);
3779
3780 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3781 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3782 (admin_queues->int_msg_num << 16);
3783 writel(reg, &pqi_registers->admin_iq_num_elements);
3784 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3785 &pqi_registers->function_and_status_code);
3786
3787 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3788 while (1) {
3789 status = readb(&pqi_registers->function_and_status_code);
3790 if (status == PQI_STATUS_IDLE)
3791 break;
3792 if (time_after(jiffies, timeout))
3793 return -ETIMEDOUT;
3794 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3795 }
3796
3797 /*
3798 * The offset registers are not initialized to the correct
3799 * offsets until *after* the create admin queue pair command
3800 * completes successfully.
3801 */
3802 admin_queues->iq_pi = ctrl_info->iomem_base +
3803 PQI_DEVICE_REGISTERS_OFFSET +
3804 readq(&pqi_registers->admin_iq_pi_offset);
3805 admin_queues->oq_ci = ctrl_info->iomem_base +
3806 PQI_DEVICE_REGISTERS_OFFSET +
3807 readq(&pqi_registers->admin_oq_ci_offset);
3808
3809 return 0;
3810}
3811
3812static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3813 struct pqi_general_admin_request *request)
3814{
3815 struct pqi_admin_queues *admin_queues;
3816 void *next_element;
3817 pqi_index_t iq_pi;
3818
3819 admin_queues = &ctrl_info->admin_queues;
3820 iq_pi = admin_queues->iq_pi_copy;
3821
3822 next_element = admin_queues->iq_element_array +
3823 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3824
3825 memcpy(next_element, request, sizeof(*request));
3826
3827 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3828 admin_queues->iq_pi_copy = iq_pi;
3829
3830 /*
3831 * This write notifies the controller that an IU is available to be
3832 * processed.
3833 */
3834 writel(iq_pi, admin_queues->iq_pi);
3835}
3836
Kevin Barnett13bede62017-05-03 18:55:13 -05003837#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3838
Kevin Barnett6c223762016-06-27 16:41:00 -05003839static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3840 struct pqi_general_admin_response *response)
3841{
3842 struct pqi_admin_queues *admin_queues;
3843 pqi_index_t oq_pi;
3844 pqi_index_t oq_ci;
3845 unsigned long timeout;
3846
3847 admin_queues = &ctrl_info->admin_queues;
3848 oq_ci = admin_queues->oq_ci_copy;
3849
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003850 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
Kevin Barnett6c223762016-06-27 16:41:00 -05003851
3852 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003853 oq_pi = readl(admin_queues->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003854 if (oq_pi != oq_ci)
3855 break;
3856 if (time_after(jiffies, timeout)) {
3857 dev_err(&ctrl_info->pci_dev->dev,
3858 "timed out waiting for admin response\n");
3859 return -ETIMEDOUT;
3860 }
Kevin Barnett13bede62017-05-03 18:55:13 -05003861 if (!sis_is_firmware_running(ctrl_info))
3862 return -ENXIO;
Kevin Barnett6c223762016-06-27 16:41:00 -05003863 usleep_range(1000, 2000);
3864 }
3865
3866 memcpy(response, admin_queues->oq_element_array +
3867 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3868
3869 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3870 admin_queues->oq_ci_copy = oq_ci;
3871 writel(oq_ci, admin_queues->oq_ci);
3872
3873 return 0;
3874}
3875
3876static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3877 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3878 struct pqi_io_request *io_request)
3879{
3880 struct pqi_io_request *next;
3881 void *next_element;
3882 pqi_index_t iq_pi;
3883 pqi_index_t iq_ci;
3884 size_t iu_length;
3885 unsigned long flags;
3886 unsigned int num_elements_needed;
3887 unsigned int num_elements_to_end_of_queue;
3888 size_t copy_count;
3889 struct pqi_iu_header *request;
3890
3891 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3892
Kevin Barnett376fb882017-05-03 18:54:43 -05003893 if (io_request) {
3894 io_request->queue_group = queue_group;
Kevin Barnett6c223762016-06-27 16:41:00 -05003895 list_add_tail(&io_request->request_list_entry,
3896 &queue_group->request_list[path]);
Kevin Barnett376fb882017-05-03 18:54:43 -05003897 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003898
3899 iq_pi = queue_group->iq_pi_copy[path];
3900
3901 list_for_each_entry_safe(io_request, next,
3902 &queue_group->request_list[path], request_list_entry) {
3903
3904 request = io_request->iu;
3905
3906 iu_length = get_unaligned_le16(&request->iu_length) +
3907 PQI_REQUEST_HEADER_LENGTH;
3908 num_elements_needed =
3909 DIV_ROUND_UP(iu_length,
3910 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3911
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003912 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003913
3914 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3915 ctrl_info->num_elements_per_iq))
3916 break;
3917
3918 put_unaligned_le16(queue_group->oq_id,
3919 &request->response_queue_id);
3920
3921 next_element = queue_group->iq_element_array[path] +
3922 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3923
3924 num_elements_to_end_of_queue =
3925 ctrl_info->num_elements_per_iq - iq_pi;
3926
3927 if (num_elements_needed <= num_elements_to_end_of_queue) {
3928 memcpy(next_element, request, iu_length);
3929 } else {
3930 copy_count = num_elements_to_end_of_queue *
3931 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3932 memcpy(next_element, request, copy_count);
3933 memcpy(queue_group->iq_element_array[path],
3934 (u8 *)request + copy_count,
3935 iu_length - copy_count);
3936 }
3937
3938 iq_pi = (iq_pi + num_elements_needed) %
3939 ctrl_info->num_elements_per_iq;
3940
3941 list_del(&io_request->request_list_entry);
3942 }
3943
3944 if (iq_pi != queue_group->iq_pi_copy[path]) {
3945 queue_group->iq_pi_copy[path] = iq_pi;
3946 /*
3947 * This write notifies the controller that one or more IUs are
3948 * available to be processed.
3949 */
3950 writel(iq_pi, queue_group->iq_pi[path]);
3951 }
3952
3953 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3954}
3955
Kevin Barnett1f37e992017-05-03 18:53:24 -05003956#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3957
3958static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3959 struct completion *wait)
3960{
3961 int rc;
Kevin Barnett1f37e992017-05-03 18:53:24 -05003962
3963 while (1) {
3964 if (wait_for_completion_io_timeout(wait,
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06003965 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
Kevin Barnett1f37e992017-05-03 18:53:24 -05003966 rc = 0;
3967 break;
3968 }
3969
3970 pqi_check_ctrl_health(ctrl_info);
3971 if (pqi_ctrl_offline(ctrl_info)) {
3972 rc = -ENXIO;
3973 break;
3974 }
Kevin Barnett1f37e992017-05-03 18:53:24 -05003975 }
3976
3977 return rc;
3978}
3979
Kevin Barnett6c223762016-06-27 16:41:00 -05003980static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3981 void *context)
3982{
3983 struct completion *waiting = context;
3984
3985 complete(waiting);
3986}
3987
Kevin Barnett26b390a2018-06-18 13:22:48 -05003988static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3989 *error_info)
3990{
3991 int rc = -EIO;
3992
3993 switch (error_info->data_out_result) {
3994 case PQI_DATA_IN_OUT_GOOD:
3995 if (error_info->status == SAM_STAT_GOOD)
3996 rc = 0;
3997 break;
3998 case PQI_DATA_IN_OUT_UNDERFLOW:
3999 if (error_info->status == SAM_STAT_GOOD ||
4000 error_info->status == SAM_STAT_CHECK_CONDITION)
4001 rc = 0;
4002 break;
4003 case PQI_DATA_IN_OUT_ABORTED:
4004 rc = PQI_CMD_STATUS_ABORTED;
4005 break;
4006 }
4007
4008 return rc;
4009}
4010
Kevin Barnett6c223762016-06-27 16:41:00 -05004011static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4012 struct pqi_iu_header *request, unsigned int flags,
4013 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4014{
Kevin Barnett957c5ab2018-06-18 13:22:42 -05004015 int rc = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004016 struct pqi_io_request *io_request;
4017 unsigned long start_jiffies;
4018 unsigned long msecs_blocked;
4019 size_t iu_length;
Kevin Barnett957c5ab2018-06-18 13:22:42 -05004020 DECLARE_COMPLETION_ONSTACK(wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05004021
4022 /*
4023 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4024 * are mutually exclusive.
4025 */
4026
4027 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4028 if (down_interruptible(&ctrl_info->sync_request_sem))
4029 return -ERESTARTSYS;
4030 } else {
4031 if (timeout_msecs == NO_TIMEOUT) {
4032 down(&ctrl_info->sync_request_sem);
4033 } else {
4034 start_jiffies = jiffies;
4035 if (down_timeout(&ctrl_info->sync_request_sem,
4036 msecs_to_jiffies(timeout_msecs)))
4037 return -ETIMEDOUT;
4038 msecs_blocked =
4039 jiffies_to_msecs(jiffies - start_jiffies);
4040 if (msecs_blocked >= timeout_msecs)
4041 return -ETIMEDOUT;
4042 timeout_msecs -= msecs_blocked;
4043 }
4044 }
4045
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004046 pqi_ctrl_busy(ctrl_info);
4047 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4048 if (timeout_msecs == 0) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05004049 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004050 rc = -ETIMEDOUT;
4051 goto out;
4052 }
4053
Kevin Barnett376fb882017-05-03 18:54:43 -05004054 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05004055 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05004056 rc = -ENXIO;
4057 goto out;
4058 }
4059
Kevin Barnett6c223762016-06-27 16:41:00 -05004060 io_request = pqi_alloc_io_request(ctrl_info);
4061
4062 put_unaligned_le16(io_request->index,
4063 &(((struct pqi_raid_path_request *)request)->request_id));
4064
4065 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4066 ((struct pqi_raid_path_request *)request)->error_index =
4067 ((struct pqi_raid_path_request *)request)->request_id;
4068
4069 iu_length = get_unaligned_le16(&request->iu_length) +
4070 PQI_REQUEST_HEADER_LENGTH;
4071 memcpy(io_request->iu, request, iu_length);
4072
Kevin Barnett957c5ab2018-06-18 13:22:42 -05004073 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4074 io_request->context = &wait;
4075
4076 pqi_start_io(ctrl_info,
4077 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4078 io_request);
4079
4080 pqi_ctrl_unbusy(ctrl_info);
4081
4082 if (timeout_msecs == NO_TIMEOUT) {
4083 pqi_wait_for_completion_io(ctrl_info, &wait);
4084 } else {
4085 if (!wait_for_completion_io_timeout(&wait,
4086 msecs_to_jiffies(timeout_msecs))) {
4087 dev_warn(&ctrl_info->pci_dev->dev,
4088 "command timed out\n");
4089 rc = -ETIMEDOUT;
4090 }
4091 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004092
4093 if (error_info) {
4094 if (io_request->error_info)
4095 memcpy(error_info, io_request->error_info,
4096 sizeof(*error_info));
4097 else
4098 memset(error_info, 0, sizeof(*error_info));
4099 } else if (rc == 0 && io_request->error_info) {
Kevin Barnett26b390a2018-06-18 13:22:48 -05004100 rc = pqi_process_raid_io_error_synchronous(
4101 io_request->error_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05004102 }
4103
4104 pqi_free_io_request(io_request);
4105
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004106out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004107 up(&ctrl_info->sync_request_sem);
4108
4109 return rc;
4110}
4111
4112static int pqi_validate_admin_response(
4113 struct pqi_general_admin_response *response, u8 expected_function_code)
4114{
4115 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4116 return -EINVAL;
4117
4118 if (get_unaligned_le16(&response->header.iu_length) !=
4119 PQI_GENERAL_ADMIN_IU_LENGTH)
4120 return -EINVAL;
4121
4122 if (response->function_code != expected_function_code)
4123 return -EINVAL;
4124
4125 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4126 return -EINVAL;
4127
4128 return 0;
4129}
4130
4131static int pqi_submit_admin_request_synchronous(
4132 struct pqi_ctrl_info *ctrl_info,
4133 struct pqi_general_admin_request *request,
4134 struct pqi_general_admin_response *response)
4135{
4136 int rc;
4137
4138 pqi_submit_admin_request(ctrl_info, request);
4139
4140 rc = pqi_poll_for_admin_response(ctrl_info, response);
4141
4142 if (rc == 0)
4143 rc = pqi_validate_admin_response(response,
4144 request->function_code);
4145
4146 return rc;
4147}
4148
4149static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4150{
4151 int rc;
4152 struct pqi_general_admin_request request;
4153 struct pqi_general_admin_response response;
4154 struct pqi_device_capability *capability;
4155 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4156
4157 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4158 if (!capability)
4159 return -ENOMEM;
4160
4161 memset(&request, 0, sizeof(request));
4162
4163 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4164 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4165 &request.header.iu_length);
4166 request.function_code =
4167 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4168 put_unaligned_le32(sizeof(*capability),
4169 &request.data.report_device_capability.buffer_length);
4170
4171 rc = pqi_map_single(ctrl_info->pci_dev,
4172 &request.data.report_device_capability.sg_descriptor,
4173 capability, sizeof(*capability),
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004174 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004175 if (rc)
4176 goto out;
4177
4178 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4179 &response);
4180
4181 pqi_pci_unmap(ctrl_info->pci_dev,
4182 &request.data.report_device_capability.sg_descriptor, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004183 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004184
4185 if (rc)
4186 goto out;
4187
4188 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4189 rc = -EIO;
4190 goto out;
4191 }
4192
4193 ctrl_info->max_inbound_queues =
4194 get_unaligned_le16(&capability->max_inbound_queues);
4195 ctrl_info->max_elements_per_iq =
4196 get_unaligned_le16(&capability->max_elements_per_iq);
4197 ctrl_info->max_iq_element_length =
4198 get_unaligned_le16(&capability->max_iq_element_length)
4199 * 16;
4200 ctrl_info->max_outbound_queues =
4201 get_unaligned_le16(&capability->max_outbound_queues);
4202 ctrl_info->max_elements_per_oq =
4203 get_unaligned_le16(&capability->max_elements_per_oq);
4204 ctrl_info->max_oq_element_length =
4205 get_unaligned_le16(&capability->max_oq_element_length)
4206 * 16;
4207
4208 sop_iu_layer_descriptor =
4209 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4210
4211 ctrl_info->max_inbound_iu_length_per_firmware =
4212 get_unaligned_le16(
4213 &sop_iu_layer_descriptor->max_inbound_iu_length);
4214 ctrl_info->inbound_spanning_supported =
4215 sop_iu_layer_descriptor->inbound_spanning_supported;
4216 ctrl_info->outbound_spanning_supported =
4217 sop_iu_layer_descriptor->outbound_spanning_supported;
4218
4219out:
4220 kfree(capability);
4221
4222 return rc;
4223}
4224
4225static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4226{
4227 if (ctrl_info->max_iq_element_length <
4228 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4229 dev_err(&ctrl_info->pci_dev->dev,
4230 "max. inbound queue element length of %d is less than the required length of %d\n",
4231 ctrl_info->max_iq_element_length,
4232 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4233 return -EINVAL;
4234 }
4235
4236 if (ctrl_info->max_oq_element_length <
4237 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4238 dev_err(&ctrl_info->pci_dev->dev,
4239 "max. outbound queue element length of %d is less than the required length of %d\n",
4240 ctrl_info->max_oq_element_length,
4241 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4242 return -EINVAL;
4243 }
4244
4245 if (ctrl_info->max_inbound_iu_length_per_firmware <
4246 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4247 dev_err(&ctrl_info->pci_dev->dev,
4248 "max. inbound IU length of %u is less than the min. required length of %d\n",
4249 ctrl_info->max_inbound_iu_length_per_firmware,
4250 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4251 return -EINVAL;
4252 }
4253
Kevin Barnett77668f42016-08-31 14:54:23 -05004254 if (!ctrl_info->inbound_spanning_supported) {
4255 dev_err(&ctrl_info->pci_dev->dev,
4256 "the controller does not support inbound spanning\n");
4257 return -EINVAL;
4258 }
4259
4260 if (ctrl_info->outbound_spanning_supported) {
4261 dev_err(&ctrl_info->pci_dev->dev,
4262 "the controller supports outbound spanning but this driver does not\n");
4263 return -EINVAL;
4264 }
4265
Kevin Barnett6c223762016-06-27 16:41:00 -05004266 return 0;
4267}
4268
Kevin Barnett6c223762016-06-27 16:41:00 -05004269static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4270{
4271 int rc;
4272 struct pqi_event_queue *event_queue;
4273 struct pqi_general_admin_request request;
4274 struct pqi_general_admin_response response;
4275
4276 event_queue = &ctrl_info->event_queue;
4277
4278 /*
4279 * Create OQ (Outbound Queue - device to host queue) to dedicate
4280 * to events.
4281 */
4282 memset(&request, 0, sizeof(request));
4283 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4284 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4285 &request.header.iu_length);
4286 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4287 put_unaligned_le16(event_queue->oq_id,
4288 &request.data.create_operational_oq.queue_id);
4289 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4290 &request.data.create_operational_oq.element_array_addr);
4291 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4292 &request.data.create_operational_oq.pi_addr);
4293 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4294 &request.data.create_operational_oq.num_elements);
4295 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4296 &request.data.create_operational_oq.element_length);
4297 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4298 put_unaligned_le16(event_queue->int_msg_num,
4299 &request.data.create_operational_oq.int_msg_num);
4300
4301 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4302 &response);
4303 if (rc)
4304 return rc;
4305
4306 event_queue->oq_ci = ctrl_info->iomem_base +
4307 PQI_DEVICE_REGISTERS_OFFSET +
4308 get_unaligned_le64(
4309 &response.data.create_operational_oq.oq_ci_offset);
4310
4311 return 0;
4312}
4313
Kevin Barnett061ef062017-05-03 18:53:05 -05004314static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4315 unsigned int group_number)
Kevin Barnett6c223762016-06-27 16:41:00 -05004316{
Kevin Barnett6c223762016-06-27 16:41:00 -05004317 int rc;
4318 struct pqi_queue_group *queue_group;
4319 struct pqi_general_admin_request request;
4320 struct pqi_general_admin_response response;
4321
Kevin Barnett061ef062017-05-03 18:53:05 -05004322 queue_group = &ctrl_info->queue_groups[group_number];
Kevin Barnett6c223762016-06-27 16:41:00 -05004323
4324 /*
4325 * Create IQ (Inbound Queue - host to device queue) for
4326 * RAID path.
4327 */
4328 memset(&request, 0, sizeof(request));
4329 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4330 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4331 &request.header.iu_length);
4332 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4333 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4334 &request.data.create_operational_iq.queue_id);
4335 put_unaligned_le64(
4336 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4337 &request.data.create_operational_iq.element_array_addr);
4338 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4339 &request.data.create_operational_iq.ci_addr);
4340 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4341 &request.data.create_operational_iq.num_elements);
4342 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4343 &request.data.create_operational_iq.element_length);
4344 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4345
4346 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4347 &response);
4348 if (rc) {
4349 dev_err(&ctrl_info->pci_dev->dev,
4350 "error creating inbound RAID queue\n");
4351 return rc;
4352 }
4353
4354 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4355 PQI_DEVICE_REGISTERS_OFFSET +
4356 get_unaligned_le64(
4357 &response.data.create_operational_iq.iq_pi_offset);
4358
4359 /*
4360 * Create IQ (Inbound Queue - host to device queue) for
4361 * Advanced I/O (AIO) path.
4362 */
4363 memset(&request, 0, sizeof(request));
4364 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4365 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4366 &request.header.iu_length);
4367 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4368 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4369 &request.data.create_operational_iq.queue_id);
4370 put_unaligned_le64((u64)queue_group->
4371 iq_element_array_bus_addr[AIO_PATH],
4372 &request.data.create_operational_iq.element_array_addr);
4373 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4374 &request.data.create_operational_iq.ci_addr);
4375 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4376 &request.data.create_operational_iq.num_elements);
4377 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4378 &request.data.create_operational_iq.element_length);
4379 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4380
4381 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4382 &response);
4383 if (rc) {
4384 dev_err(&ctrl_info->pci_dev->dev,
4385 "error creating inbound AIO queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004386 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004387 }
4388
4389 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4390 PQI_DEVICE_REGISTERS_OFFSET +
4391 get_unaligned_le64(
4392 &response.data.create_operational_iq.iq_pi_offset);
4393
4394 /*
4395 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4396 * assumed to be for RAID path I/O unless we change the queue's
4397 * property.
4398 */
4399 memset(&request, 0, sizeof(request));
4400 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4401 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4402 &request.header.iu_length);
4403 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4404 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4405 &request.data.change_operational_iq_properties.queue_id);
4406 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4407 &request.data.change_operational_iq_properties.vendor_specific);
4408
4409 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4410 &response);
4411 if (rc) {
4412 dev_err(&ctrl_info->pci_dev->dev,
4413 "error changing queue property\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004414 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004415 }
4416
4417 /*
4418 * Create OQ (Outbound Queue - device to host queue).
4419 */
4420 memset(&request, 0, sizeof(request));
4421 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4422 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4423 &request.header.iu_length);
4424 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4425 put_unaligned_le16(queue_group->oq_id,
4426 &request.data.create_operational_oq.queue_id);
4427 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4428 &request.data.create_operational_oq.element_array_addr);
4429 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4430 &request.data.create_operational_oq.pi_addr);
4431 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4432 &request.data.create_operational_oq.num_elements);
4433 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4434 &request.data.create_operational_oq.element_length);
4435 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4436 put_unaligned_le16(queue_group->int_msg_num,
4437 &request.data.create_operational_oq.int_msg_num);
4438
4439 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4440 &response);
4441 if (rc) {
4442 dev_err(&ctrl_info->pci_dev->dev,
4443 "error creating outbound queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004444 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004445 }
4446
4447 queue_group->oq_ci = ctrl_info->iomem_base +
4448 PQI_DEVICE_REGISTERS_OFFSET +
4449 get_unaligned_le64(
4450 &response.data.create_operational_oq.oq_ci_offset);
4451
Kevin Barnett6c223762016-06-27 16:41:00 -05004452 return 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004453}
4454
4455static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4456{
4457 int rc;
4458 unsigned int i;
4459
4460 rc = pqi_create_event_queue(ctrl_info);
4461 if (rc) {
4462 dev_err(&ctrl_info->pci_dev->dev,
4463 "error creating event queue\n");
4464 return rc;
4465 }
4466
4467 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
Kevin Barnett061ef062017-05-03 18:53:05 -05004468 rc = pqi_create_queue_group(ctrl_info, i);
Kevin Barnett6c223762016-06-27 16:41:00 -05004469 if (rc) {
4470 dev_err(&ctrl_info->pci_dev->dev,
4471 "error creating queue group number %u/%u\n",
4472 i, ctrl_info->num_queue_groups);
4473 return rc;
4474 }
4475 }
4476
4477 return 0;
4478}
4479
4480#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4481 (offsetof(struct pqi_event_config, descriptors) + \
4482 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4483
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004484static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4485 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05004486{
4487 int rc;
4488 unsigned int i;
4489 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004490 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05004491 struct pqi_general_management_request request;
4492
4493 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4494 GFP_KERNEL);
4495 if (!event_config)
4496 return -ENOMEM;
4497
4498 memset(&request, 0, sizeof(request));
4499
4500 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4501 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4502 data.report_event_configuration.sg_descriptors[1]) -
4503 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4504 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4505 &request.data.report_event_configuration.buffer_length);
4506
4507 rc = pqi_map_single(ctrl_info->pci_dev,
4508 request.data.report_event_configuration.sg_descriptors,
4509 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004510 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004511 if (rc)
4512 goto out;
4513
4514 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4515 0, NULL, NO_TIMEOUT);
4516
4517 pqi_pci_unmap(ctrl_info->pci_dev,
4518 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004519 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004520
4521 if (rc)
4522 goto out;
4523
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004524 for (i = 0; i < event_config->num_event_descriptors; i++) {
4525 event_descriptor = &event_config->descriptors[i];
4526 if (enable_events &&
4527 pqi_is_supported_event(event_descriptor->event_type))
4528 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4529 &event_descriptor->oq_id);
4530 else
4531 put_unaligned_le16(0, &event_descriptor->oq_id);
4532 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004533
4534 memset(&request, 0, sizeof(request));
4535
4536 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4537 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4538 data.report_event_configuration.sg_descriptors[1]) -
4539 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4540 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4541 &request.data.report_event_configuration.buffer_length);
4542
4543 rc = pqi_map_single(ctrl_info->pci_dev,
4544 request.data.report_event_configuration.sg_descriptors,
4545 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004546 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004547 if (rc)
4548 goto out;
4549
4550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4551 NULL, NO_TIMEOUT);
4552
4553 pqi_pci_unmap(ctrl_info->pci_dev,
4554 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004555 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004556
4557out:
4558 kfree(event_config);
4559
4560 return rc;
4561}
4562
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004563static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4564{
4565 return pqi_configure_events(ctrl_info, true);
4566}
4567
4568static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4569{
4570 return pqi_configure_events(ctrl_info, false);
4571}
4572
Kevin Barnett6c223762016-06-27 16:41:00 -05004573static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4574{
4575 unsigned int i;
4576 struct device *dev;
4577 size_t sg_chain_buffer_length;
4578 struct pqi_io_request *io_request;
4579
4580 if (!ctrl_info->io_request_pool)
4581 return;
4582
4583 dev = &ctrl_info->pci_dev->dev;
4584 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4585 io_request = ctrl_info->io_request_pool;
4586
4587 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4588 kfree(io_request->iu);
4589 if (!io_request->sg_chain_buffer)
4590 break;
4591 dma_free_coherent(dev, sg_chain_buffer_length,
4592 io_request->sg_chain_buffer,
4593 io_request->sg_chain_buffer_dma_handle);
4594 io_request++;
4595 }
4596
4597 kfree(ctrl_info->io_request_pool);
4598 ctrl_info->io_request_pool = NULL;
4599}
4600
4601static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4602{
4603 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4604 ctrl_info->error_buffer_length,
4605 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4606
4607 if (!ctrl_info->error_buffer)
4608 return -ENOMEM;
4609
4610 return 0;
4611}
4612
4613static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4614{
4615 unsigned int i;
4616 void *sg_chain_buffer;
4617 size_t sg_chain_buffer_length;
4618 dma_addr_t sg_chain_buffer_dma_handle;
4619 struct device *dev;
4620 struct pqi_io_request *io_request;
4621
Kees Cook6396bb22018-06-12 14:03:40 -07004622 ctrl_info->io_request_pool =
4623 kcalloc(ctrl_info->max_io_slots,
4624 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05004625
4626 if (!ctrl_info->io_request_pool) {
4627 dev_err(&ctrl_info->pci_dev->dev,
4628 "failed to allocate I/O request pool\n");
4629 goto error;
4630 }
4631
4632 dev = &ctrl_info->pci_dev->dev;
4633 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4634 io_request = ctrl_info->io_request_pool;
4635
4636 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4637 io_request->iu =
4638 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4639
4640 if (!io_request->iu) {
4641 dev_err(&ctrl_info->pci_dev->dev,
4642 "failed to allocate IU buffers\n");
4643 goto error;
4644 }
4645
4646 sg_chain_buffer = dma_alloc_coherent(dev,
4647 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4648 GFP_KERNEL);
4649
4650 if (!sg_chain_buffer) {
4651 dev_err(&ctrl_info->pci_dev->dev,
4652 "failed to allocate PQI scatter-gather chain buffers\n");
4653 goto error;
4654 }
4655
4656 io_request->index = i;
4657 io_request->sg_chain_buffer = sg_chain_buffer;
4658 io_request->sg_chain_buffer_dma_handle =
4659 sg_chain_buffer_dma_handle;
4660 io_request++;
4661 }
4662
4663 return 0;
4664
4665error:
4666 pqi_free_all_io_requests(ctrl_info);
4667
4668 return -ENOMEM;
4669}
4670
4671/*
4672 * Calculate required resources that are sized based on max. outstanding
4673 * requests and max. transfer size.
4674 */
4675
4676static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4677{
4678 u32 max_transfer_size;
4679 u32 max_sg_entries;
4680
4681 ctrl_info->scsi_ml_can_queue =
4682 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4683 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4684
4685 ctrl_info->error_buffer_length =
4686 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4687
Kevin Barnettd727a772017-05-03 18:54:25 -05004688 if (reset_devices)
4689 max_transfer_size = min(ctrl_info->max_transfer_size,
4690 PQI_MAX_TRANSFER_SIZE_KDUMP);
4691 else
4692 max_transfer_size = min(ctrl_info->max_transfer_size,
4693 PQI_MAX_TRANSFER_SIZE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004694
4695 max_sg_entries = max_transfer_size / PAGE_SIZE;
4696
4697 /* +1 to cover when the buffer is not page-aligned. */
4698 max_sg_entries++;
4699
4700 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4701
4702 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4703
4704 ctrl_info->sg_chain_buffer_length =
Kevin Barnette1d213b2017-05-03 18:53:18 -05004705 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4706 PQI_EXTRA_SGL_MEMORY;
Kevin Barnett6c223762016-06-27 16:41:00 -05004707 ctrl_info->sg_tablesize = max_sg_entries;
4708 ctrl_info->max_sectors = max_transfer_size / 512;
4709}
4710
4711static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4712{
Kevin Barnett6c223762016-06-27 16:41:00 -05004713 int num_queue_groups;
4714 u16 num_elements_per_iq;
4715 u16 num_elements_per_oq;
4716
Kevin Barnettd727a772017-05-03 18:54:25 -05004717 if (reset_devices) {
4718 num_queue_groups = 1;
4719 } else {
4720 int num_cpus;
4721 int max_queue_groups;
Kevin Barnett6c223762016-06-27 16:41:00 -05004722
Kevin Barnettd727a772017-05-03 18:54:25 -05004723 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4724 ctrl_info->max_outbound_queues - 1);
4725 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4726
4727 num_cpus = num_online_cpus();
4728 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4729 num_queue_groups = min(num_queue_groups, max_queue_groups);
4730 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004731
4732 ctrl_info->num_queue_groups = num_queue_groups;
Kevin Barnett061ef062017-05-03 18:53:05 -05004733 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05004734
Kevin Barnett77668f42016-08-31 14:54:23 -05004735 /*
4736 * Make sure that the max. inbound IU length is an even multiple
4737 * of our inbound element length.
4738 */
4739 ctrl_info->max_inbound_iu_length =
4740 (ctrl_info->max_inbound_iu_length_per_firmware /
4741 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4742 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004743
4744 num_elements_per_iq =
4745 (ctrl_info->max_inbound_iu_length /
4746 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4747
4748 /* Add one because one element in each queue is unusable. */
4749 num_elements_per_iq++;
4750
4751 num_elements_per_iq = min(num_elements_per_iq,
4752 ctrl_info->max_elements_per_iq);
4753
4754 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4755 num_elements_per_oq = min(num_elements_per_oq,
4756 ctrl_info->max_elements_per_oq);
4757
4758 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4759 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4760
4761 ctrl_info->max_sg_per_iu =
4762 ((ctrl_info->max_inbound_iu_length -
4763 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4764 sizeof(struct pqi_sg_descriptor)) +
4765 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4766}
4767
4768static inline void pqi_set_sg_descriptor(
4769 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4770{
4771 u64 address = (u64)sg_dma_address(sg);
4772 unsigned int length = sg_dma_len(sg);
4773
4774 put_unaligned_le64(address, &sg_descriptor->address);
4775 put_unaligned_le32(length, &sg_descriptor->length);
4776 put_unaligned_le32(0, &sg_descriptor->flags);
4777}
4778
4779static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4780 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4781 struct pqi_io_request *io_request)
4782{
4783 int i;
4784 u16 iu_length;
4785 int sg_count;
4786 bool chained;
4787 unsigned int num_sg_in_iu;
4788 unsigned int max_sg_per_iu;
4789 struct scatterlist *sg;
4790 struct pqi_sg_descriptor *sg_descriptor;
4791
4792 sg_count = scsi_dma_map(scmd);
4793 if (sg_count < 0)
4794 return sg_count;
4795
4796 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4797 PQI_REQUEST_HEADER_LENGTH;
4798
4799 if (sg_count == 0)
4800 goto out;
4801
4802 sg = scsi_sglist(scmd);
4803 sg_descriptor = request->sg_descriptors;
4804 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4805 chained = false;
4806 num_sg_in_iu = 0;
4807 i = 0;
4808
4809 while (1) {
4810 pqi_set_sg_descriptor(sg_descriptor, sg);
4811 if (!chained)
4812 num_sg_in_iu++;
4813 i++;
4814 if (i == sg_count)
4815 break;
4816 sg_descriptor++;
4817 if (i == max_sg_per_iu) {
4818 put_unaligned_le64(
4819 (u64)io_request->sg_chain_buffer_dma_handle,
4820 &sg_descriptor->address);
4821 put_unaligned_le32((sg_count - num_sg_in_iu)
4822 * sizeof(*sg_descriptor),
4823 &sg_descriptor->length);
4824 put_unaligned_le32(CISS_SG_CHAIN,
4825 &sg_descriptor->flags);
4826 chained = true;
4827 num_sg_in_iu++;
4828 sg_descriptor = io_request->sg_chain_buffer;
4829 }
4830 sg = sg_next(sg);
4831 }
4832
4833 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4834 request->partial = chained;
4835 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4836
4837out:
4838 put_unaligned_le16(iu_length, &request->header.iu_length);
4839
4840 return 0;
4841}
4842
4843static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4844 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4845 struct pqi_io_request *io_request)
4846{
4847 int i;
4848 u16 iu_length;
4849 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004850 bool chained;
4851 unsigned int num_sg_in_iu;
4852 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004853 struct scatterlist *sg;
4854 struct pqi_sg_descriptor *sg_descriptor;
4855
4856 sg_count = scsi_dma_map(scmd);
4857 if (sg_count < 0)
4858 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004859
4860 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4861 PQI_REQUEST_HEADER_LENGTH;
4862 num_sg_in_iu = 0;
4863
Kevin Barnett6c223762016-06-27 16:41:00 -05004864 if (sg_count == 0)
4865 goto out;
4866
Kevin Barnetta60eec02016-08-31 14:54:11 -05004867 sg = scsi_sglist(scmd);
4868 sg_descriptor = request->sg_descriptors;
4869 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4870 chained = false;
4871 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004872
Kevin Barnetta60eec02016-08-31 14:54:11 -05004873 while (1) {
4874 pqi_set_sg_descriptor(sg_descriptor, sg);
4875 if (!chained)
4876 num_sg_in_iu++;
4877 i++;
4878 if (i == sg_count)
4879 break;
4880 sg_descriptor++;
4881 if (i == max_sg_per_iu) {
4882 put_unaligned_le64(
4883 (u64)io_request->sg_chain_buffer_dma_handle,
4884 &sg_descriptor->address);
4885 put_unaligned_le32((sg_count - num_sg_in_iu)
4886 * sizeof(*sg_descriptor),
4887 &sg_descriptor->length);
4888 put_unaligned_le32(CISS_SG_CHAIN,
4889 &sg_descriptor->flags);
4890 chained = true;
4891 num_sg_in_iu++;
4892 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004893 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004894 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004895 }
4896
Kevin Barnetta60eec02016-08-31 14:54:11 -05004897 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4898 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004899 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004900
4901out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004902 put_unaligned_le16(iu_length, &request->header.iu_length);
4903 request->num_sg_descriptors = num_sg_in_iu;
4904
4905 return 0;
4906}
4907
4908static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4909 void *context)
4910{
4911 struct scsi_cmnd *scmd;
4912
4913 scmd = io_request->scmd;
4914 pqi_free_io_request(io_request);
4915 scsi_dma_unmap(scmd);
4916 pqi_scsi_done(scmd);
4917}
4918
Kevin Barnett376fb882017-05-03 18:54:43 -05004919static int pqi_raid_submit_scsi_cmd_with_io_request(
4920 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004921 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4922 struct pqi_queue_group *queue_group)
4923{
4924 int rc;
4925 size_t cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -05004926 struct pqi_raid_path_request *request;
4927
Kevin Barnett6c223762016-06-27 16:41:00 -05004928 io_request->io_complete_callback = pqi_raid_io_complete;
4929 io_request->scmd = scmd;
4930
Kevin Barnett6c223762016-06-27 16:41:00 -05004931 request = io_request->iu;
4932 memset(request, 0,
4933 offsetof(struct pqi_raid_path_request, sg_descriptors));
4934
4935 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4936 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4937 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4938 put_unaligned_le16(io_request->index, &request->request_id);
4939 request->error_index = request->request_id;
4940 memcpy(request->lun_number, device->scsi3addr,
4941 sizeof(request->lun_number));
4942
4943 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4944 memcpy(request->cdb, scmd->cmnd, cdb_length);
4945
4946 switch (cdb_length) {
4947 case 6:
4948 case 10:
4949 case 12:
4950 case 16:
4951 /* No bytes in the Additional CDB bytes field */
4952 request->additional_cdb_bytes_usage =
4953 SOP_ADDITIONAL_CDB_BYTES_0;
4954 break;
4955 case 20:
4956 /* 4 bytes in the Additional cdb field */
4957 request->additional_cdb_bytes_usage =
4958 SOP_ADDITIONAL_CDB_BYTES_4;
4959 break;
4960 case 24:
4961 /* 8 bytes in the Additional cdb field */
4962 request->additional_cdb_bytes_usage =
4963 SOP_ADDITIONAL_CDB_BYTES_8;
4964 break;
4965 case 28:
4966 /* 12 bytes in the Additional cdb field */
4967 request->additional_cdb_bytes_usage =
4968 SOP_ADDITIONAL_CDB_BYTES_12;
4969 break;
4970 case 32:
4971 default:
4972 /* 16 bytes in the Additional cdb field */
4973 request->additional_cdb_bytes_usage =
4974 SOP_ADDITIONAL_CDB_BYTES_16;
4975 break;
4976 }
4977
4978 switch (scmd->sc_data_direction) {
4979 case DMA_TO_DEVICE:
4980 request->data_direction = SOP_READ_FLAG;
4981 break;
4982 case DMA_FROM_DEVICE:
4983 request->data_direction = SOP_WRITE_FLAG;
4984 break;
4985 case DMA_NONE:
4986 request->data_direction = SOP_NO_DIRECTION_FLAG;
4987 break;
4988 case DMA_BIDIRECTIONAL:
4989 request->data_direction = SOP_BIDIRECTIONAL;
4990 break;
4991 default:
4992 dev_err(&ctrl_info->pci_dev->dev,
4993 "unknown data direction: %d\n",
4994 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004995 break;
4996 }
4997
4998 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4999 if (rc) {
5000 pqi_free_io_request(io_request);
5001 return SCSI_MLQUEUE_HOST_BUSY;
5002 }
5003
5004 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5005
5006 return 0;
5007}
5008
Kevin Barnett376fb882017-05-03 18:54:43 -05005009static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5010 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5011 struct pqi_queue_group *queue_group)
5012{
5013 struct pqi_io_request *io_request;
5014
5015 io_request = pqi_alloc_io_request(ctrl_info);
5016
5017 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5018 device, scmd, queue_group);
5019}
5020
5021static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5022{
5023 if (!pqi_ctrl_blocked(ctrl_info))
5024 schedule_work(&ctrl_info->raid_bypass_retry_work);
5025}
5026
5027static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5028{
5029 struct scsi_cmnd *scmd;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05005030 struct pqi_scsi_dev *device;
Kevin Barnett376fb882017-05-03 18:54:43 -05005031 struct pqi_ctrl_info *ctrl_info;
5032
5033 if (!io_request->raid_bypass)
5034 return false;
5035
5036 scmd = io_request->scmd;
5037 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5038 return false;
5039 if (host_byte(scmd->result) == DID_NO_CONNECT)
5040 return false;
5041
Kevin Barnett03b288cf2017-05-03 18:54:49 -05005042 device = scmd->device->hostdata;
5043 if (pqi_device_offline(device))
5044 return false;
5045
Kevin Barnett376fb882017-05-03 18:54:43 -05005046 ctrl_info = shost_to_hba(scmd->device->host);
5047 if (pqi_ctrl_offline(ctrl_info))
5048 return false;
5049
5050 return true;
5051}
5052
5053static inline void pqi_add_to_raid_bypass_retry_list(
5054 struct pqi_ctrl_info *ctrl_info,
5055 struct pqi_io_request *io_request, bool at_head)
5056{
5057 unsigned long flags;
5058
5059 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5060 if (at_head)
5061 list_add(&io_request->request_list_entry,
5062 &ctrl_info->raid_bypass_retry_list);
5063 else
5064 list_add_tail(&io_request->request_list_entry,
5065 &ctrl_info->raid_bypass_retry_list);
5066 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5067}
5068
5069static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5070 void *context)
5071{
5072 struct scsi_cmnd *scmd;
5073
5074 scmd = io_request->scmd;
5075 pqi_free_io_request(io_request);
5076 pqi_scsi_done(scmd);
5077}
5078
5079static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5080{
5081 struct scsi_cmnd *scmd;
5082 struct pqi_ctrl_info *ctrl_info;
5083
5084 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5085 scmd = io_request->scmd;
5086 scmd->result = 0;
5087 ctrl_info = shost_to_hba(scmd->device->host);
5088
5089 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5090 pqi_schedule_bypass_retry(ctrl_info);
5091}
5092
5093static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5094{
5095 struct scsi_cmnd *scmd;
5096 struct pqi_scsi_dev *device;
5097 struct pqi_ctrl_info *ctrl_info;
5098 struct pqi_queue_group *queue_group;
5099
5100 scmd = io_request->scmd;
5101 device = scmd->device->hostdata;
5102 if (pqi_device_in_reset(device)) {
5103 pqi_free_io_request(io_request);
5104 set_host_byte(scmd, DID_RESET);
5105 pqi_scsi_done(scmd);
5106 return 0;
5107 }
5108
5109 ctrl_info = shost_to_hba(scmd->device->host);
5110 queue_group = io_request->queue_group;
5111
5112 pqi_reinit_io_request(io_request);
5113
5114 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5115 device, scmd, queue_group);
5116}
5117
5118static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5119 struct pqi_ctrl_info *ctrl_info)
5120{
5121 unsigned long flags;
5122 struct pqi_io_request *io_request;
5123
5124 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5125 io_request = list_first_entry_or_null(
5126 &ctrl_info->raid_bypass_retry_list,
5127 struct pqi_io_request, request_list_entry);
5128 if (io_request)
5129 list_del(&io_request->request_list_entry);
5130 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5131
5132 return io_request;
5133}
5134
5135static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5136{
5137 int rc;
5138 struct pqi_io_request *io_request;
5139
5140 pqi_ctrl_busy(ctrl_info);
5141
5142 while (1) {
5143 if (pqi_ctrl_blocked(ctrl_info))
5144 break;
5145 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5146 if (!io_request)
5147 break;
5148 rc = pqi_retry_raid_bypass(io_request);
5149 if (rc) {
5150 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5151 true);
5152 pqi_schedule_bypass_retry(ctrl_info);
5153 break;
5154 }
5155 }
5156
5157 pqi_ctrl_unbusy(ctrl_info);
5158}
5159
5160static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5161{
5162 struct pqi_ctrl_info *ctrl_info;
5163
5164 ctrl_info = container_of(work, struct pqi_ctrl_info,
5165 raid_bypass_retry_work);
5166 pqi_retry_raid_bypass_requests(ctrl_info);
5167}
5168
Kevin Barnett5f310422017-05-03 18:54:55 -05005169static void pqi_clear_all_queued_raid_bypass_retries(
5170 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05005171{
5172 unsigned long flags;
Kevin Barnett376fb882017-05-03 18:54:43 -05005173
5174 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
Kevin Barnett5f310422017-05-03 18:54:55 -05005175 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
Kevin Barnett376fb882017-05-03 18:54:43 -05005176 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5177}
5178
Kevin Barnett6c223762016-06-27 16:41:00 -05005179static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5180 void *context)
5181{
5182 struct scsi_cmnd *scmd;
5183
5184 scmd = io_request->scmd;
5185 scsi_dma_unmap(scmd);
5186 if (io_request->status == -EAGAIN)
5187 set_host_byte(scmd, DID_IMM_RETRY);
Kevin Barnett376fb882017-05-03 18:54:43 -05005188 else if (pqi_raid_bypass_retry_needed(io_request)) {
5189 pqi_queue_raid_bypass_retry(io_request);
5190 return;
5191 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005192 pqi_free_io_request(io_request);
5193 pqi_scsi_done(scmd);
5194}
5195
5196static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5197 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5198 struct pqi_queue_group *queue_group)
5199{
5200 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05005201 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
Kevin Barnett6c223762016-06-27 16:41:00 -05005202}
5203
5204static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5205 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5206 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -05005207 struct pqi_encryption_info *encryption_info, bool raid_bypass)
Kevin Barnett6c223762016-06-27 16:41:00 -05005208{
5209 int rc;
5210 struct pqi_io_request *io_request;
5211 struct pqi_aio_path_request *request;
5212
5213 io_request = pqi_alloc_io_request(ctrl_info);
5214 io_request->io_complete_callback = pqi_aio_io_complete;
5215 io_request->scmd = scmd;
Kevin Barnett376fb882017-05-03 18:54:43 -05005216 io_request->raid_bypass = raid_bypass;
Kevin Barnett6c223762016-06-27 16:41:00 -05005217
5218 request = io_request->iu;
5219 memset(request, 0,
5220 offsetof(struct pqi_raid_path_request, sg_descriptors));
5221
5222 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5223 put_unaligned_le32(aio_handle, &request->nexus_id);
5224 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5225 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5226 put_unaligned_le16(io_request->index, &request->request_id);
5227 request->error_index = request->request_id;
5228 if (cdb_length > sizeof(request->cdb))
5229 cdb_length = sizeof(request->cdb);
5230 request->cdb_length = cdb_length;
5231 memcpy(request->cdb, cdb, cdb_length);
5232
5233 switch (scmd->sc_data_direction) {
5234 case DMA_TO_DEVICE:
5235 request->data_direction = SOP_READ_FLAG;
5236 break;
5237 case DMA_FROM_DEVICE:
5238 request->data_direction = SOP_WRITE_FLAG;
5239 break;
5240 case DMA_NONE:
5241 request->data_direction = SOP_NO_DIRECTION_FLAG;
5242 break;
5243 case DMA_BIDIRECTIONAL:
5244 request->data_direction = SOP_BIDIRECTIONAL;
5245 break;
5246 default:
5247 dev_err(&ctrl_info->pci_dev->dev,
5248 "unknown data direction: %d\n",
5249 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05005250 break;
5251 }
5252
5253 if (encryption_info) {
5254 request->encryption_enable = true;
5255 put_unaligned_le16(encryption_info->data_encryption_key_index,
5256 &request->data_encryption_key_index);
5257 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5258 &request->encrypt_tweak_lower);
5259 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5260 &request->encrypt_tweak_upper);
5261 }
5262
5263 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5264 if (rc) {
5265 pqi_free_io_request(io_request);
5266 return SCSI_MLQUEUE_HOST_BUSY;
5267 }
5268
5269 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5270
5271 return 0;
5272}
5273
Kevin Barnett061ef062017-05-03 18:53:05 -05005274static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5275 struct scsi_cmnd *scmd)
5276{
5277 u16 hw_queue;
5278
5279 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5280 if (hw_queue > ctrl_info->max_hw_queue_index)
5281 hw_queue = 0;
5282
5283 return hw_queue;
5284}
5285
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005286/*
5287 * This function gets called just before we hand the completed SCSI request
5288 * back to the SML.
5289 */
5290
5291void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5292{
5293 struct pqi_scsi_dev *device;
5294
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005295 if (!scmd->device) {
5296 set_host_byte(scmd, DID_NO_CONNECT);
5297 return;
5298 }
5299
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005300 device = scmd->device->hostdata;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005301 if (!device) {
5302 set_host_byte(scmd, DID_NO_CONNECT);
5303 return;
5304 }
5305
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005306 atomic_dec(&device->scsi_cmds_outstanding);
5307}
5308
Kevin Barnett6c223762016-06-27 16:41:00 -05005309static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05005310 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05005311{
5312 int rc;
5313 struct pqi_ctrl_info *ctrl_info;
5314 struct pqi_scsi_dev *device;
Kevin Barnett061ef062017-05-03 18:53:05 -05005315 u16 hw_queue;
Kevin Barnett6c223762016-06-27 16:41:00 -05005316 struct pqi_queue_group *queue_group;
5317 bool raid_bypassed;
5318
5319 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05005320 ctrl_info = shost_to_hba(shost);
5321
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005322 if (!device) {
5323 set_host_byte(scmd, DID_NO_CONNECT);
5324 pqi_scsi_done(scmd);
5325 return 0;
5326 }
5327
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005328 atomic_inc(&device->scsi_cmds_outstanding);
5329
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005330 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5331 device)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005332 set_host_byte(scmd, DID_NO_CONNECT);
5333 pqi_scsi_done(scmd);
5334 return 0;
5335 }
5336
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005337 pqi_ctrl_busy(ctrl_info);
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005338 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5339 pqi_ctrl_in_ofa(ctrl_info)) {
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005340 rc = SCSI_MLQUEUE_HOST_BUSY;
5341 goto out;
5342 }
5343
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05005344 /*
5345 * This is necessary because the SML doesn't zero out this field during
5346 * error recovery.
5347 */
5348 scmd->result = 0;
5349
Kevin Barnett061ef062017-05-03 18:53:05 -05005350 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5351 queue_group = &ctrl_info->queue_groups[hw_queue];
Kevin Barnett6c223762016-06-27 16:41:00 -05005352
5353 if (pqi_is_logical_device(device)) {
5354 raid_bypassed = false;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05005355 if (device->raid_bypass_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01005356 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005357 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5358 scmd, queue_group);
Kevin Barnett376fb882017-05-03 18:54:43 -05005359 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5360 raid_bypassed = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05005361 }
5362 if (!raid_bypassed)
5363 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5364 queue_group);
5365 } else {
5366 if (device->aio_enabled)
5367 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5368 queue_group);
5369 else
5370 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5371 queue_group);
5372 }
5373
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005374out:
5375 pqi_ctrl_unbusy(ctrl_info);
5376 if (rc)
5377 atomic_dec(&device->scsi_cmds_outstanding);
5378
Kevin Barnett6c223762016-06-27 16:41:00 -05005379 return rc;
5380}
5381
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005382static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5383 struct pqi_queue_group *queue_group)
5384{
5385 unsigned int path;
5386 unsigned long flags;
5387 bool list_is_empty;
5388
5389 for (path = 0; path < 2; path++) {
5390 while (1) {
5391 spin_lock_irqsave(
5392 &queue_group->submit_lock[path], flags);
5393 list_is_empty =
5394 list_empty(&queue_group->request_list[path]);
5395 spin_unlock_irqrestore(
5396 &queue_group->submit_lock[path], flags);
5397 if (list_is_empty)
5398 break;
5399 pqi_check_ctrl_health(ctrl_info);
5400 if (pqi_ctrl_offline(ctrl_info))
5401 return -ENXIO;
5402 usleep_range(1000, 2000);
5403 }
5404 }
5405
5406 return 0;
5407}
5408
5409static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5410{
5411 int rc;
5412 unsigned int i;
5413 unsigned int path;
5414 struct pqi_queue_group *queue_group;
5415 pqi_index_t iq_pi;
5416 pqi_index_t iq_ci;
5417
5418 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5419 queue_group = &ctrl_info->queue_groups[i];
5420
5421 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5422 if (rc)
5423 return rc;
5424
5425 for (path = 0; path < 2; path++) {
5426 iq_pi = queue_group->iq_pi_copy[path];
5427
5428 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05005429 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005430 if (iq_ci == iq_pi)
5431 break;
5432 pqi_check_ctrl_health(ctrl_info);
5433 if (pqi_ctrl_offline(ctrl_info))
5434 return -ENXIO;
5435 usleep_range(1000, 2000);
5436 }
5437 }
5438 }
5439
5440 return 0;
5441}
5442
5443static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5444 struct pqi_scsi_dev *device)
5445{
5446 unsigned int i;
5447 unsigned int path;
5448 struct pqi_queue_group *queue_group;
5449 unsigned long flags;
5450 struct pqi_io_request *io_request;
5451 struct pqi_io_request *next;
5452 struct scsi_cmnd *scmd;
5453 struct pqi_scsi_dev *scsi_device;
5454
5455 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5456 queue_group = &ctrl_info->queue_groups[i];
5457
5458 for (path = 0; path < 2; path++) {
5459 spin_lock_irqsave(
5460 &queue_group->submit_lock[path], flags);
5461
5462 list_for_each_entry_safe(io_request, next,
5463 &queue_group->request_list[path],
5464 request_list_entry) {
5465 scmd = io_request->scmd;
5466 if (!scmd)
5467 continue;
5468
5469 scsi_device = scmd->device->hostdata;
5470 if (scsi_device != device)
5471 continue;
5472
5473 list_del(&io_request->request_list_entry);
5474 set_host_byte(scmd, DID_RESET);
5475 pqi_scsi_done(scmd);
5476 }
5477
5478 spin_unlock_irqrestore(
5479 &queue_group->submit_lock[path], flags);
5480 }
5481 }
5482}
5483
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005484static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5485{
5486 unsigned int i;
5487 unsigned int path;
5488 struct pqi_queue_group *queue_group;
5489 unsigned long flags;
5490 struct pqi_io_request *io_request;
5491 struct pqi_io_request *next;
5492 struct scsi_cmnd *scmd;
5493
5494 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5495 queue_group = &ctrl_info->queue_groups[i];
5496
5497 for (path = 0; path < 2; path++) {
5498 spin_lock_irqsave(&queue_group->submit_lock[path],
5499 flags);
5500
5501 list_for_each_entry_safe(io_request, next,
5502 &queue_group->request_list[path],
5503 request_list_entry) {
5504
5505 scmd = io_request->scmd;
5506 if (!scmd)
5507 continue;
5508
5509 list_del(&io_request->request_list_entry);
5510 set_host_byte(scmd, DID_RESET);
5511 pqi_scsi_done(scmd);
5512 }
5513
5514 spin_unlock_irqrestore(
5515 &queue_group->submit_lock[path], flags);
5516 }
5517 }
5518}
5519
Kevin Barnett061ef062017-05-03 18:53:05 -05005520static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005521 struct pqi_scsi_dev *device, unsigned long timeout_secs)
Kevin Barnett061ef062017-05-03 18:53:05 -05005522{
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005523 unsigned long timeout;
5524
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005525 timeout = (timeout_secs * PQI_HZ) + jiffies;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005526
Kevin Barnett061ef062017-05-03 18:53:05 -05005527 while (atomic_read(&device->scsi_cmds_outstanding)) {
5528 pqi_check_ctrl_health(ctrl_info);
5529 if (pqi_ctrl_offline(ctrl_info))
5530 return -ENXIO;
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06005531 if (timeout_secs != NO_TIMEOUT) {
5532 if (time_after(jiffies, timeout)) {
5533 dev_err(&ctrl_info->pci_dev->dev,
5534 "timed out waiting for pending IO\n");
5535 return -ETIMEDOUT;
5536 }
5537 }
Kevin Barnett061ef062017-05-03 18:53:05 -05005538 usleep_range(1000, 2000);
5539 }
5540
5541 return 0;
5542}
5543
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005544static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5545 unsigned long timeout_secs)
Kevin Barnett061ef062017-05-03 18:53:05 -05005546{
5547 bool io_pending;
5548 unsigned long flags;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005549 unsigned long timeout;
Kevin Barnett061ef062017-05-03 18:53:05 -05005550 struct pqi_scsi_dev *device;
5551
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005552 timeout = (timeout_secs * PQI_HZ) + jiffies;
Kevin Barnett061ef062017-05-03 18:53:05 -05005553 while (1) {
5554 io_pending = false;
5555
5556 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5557 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5558 scsi_device_list_entry) {
5559 if (atomic_read(&device->scsi_cmds_outstanding)) {
5560 io_pending = true;
5561 break;
5562 }
5563 }
5564 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5565 flags);
5566
5567 if (!io_pending)
5568 break;
5569
5570 pqi_check_ctrl_health(ctrl_info);
5571 if (pqi_ctrl_offline(ctrl_info))
5572 return -ENXIO;
5573
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005574 if (timeout_secs != NO_TIMEOUT) {
5575 if (time_after(jiffies, timeout)) {
5576 dev_err(&ctrl_info->pci_dev->dev,
5577 "timed out waiting for pending IO\n");
5578 return -ETIMEDOUT;
5579 }
5580 }
Kevin Barnett061ef062017-05-03 18:53:05 -05005581 usleep_range(1000, 2000);
5582 }
5583
5584 return 0;
5585}
5586
Kevin Barnett14bb2152016-08-31 14:54:35 -05005587static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05005588 void *context)
5589{
5590 struct completion *waiting = context;
5591
5592 complete(waiting);
5593}
5594
Kevin Barnett14bb2152016-08-31 14:54:35 -05005595#define PQI_LUN_RESET_TIMEOUT_SECS 10
5596
5597static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5598 struct pqi_scsi_dev *device, struct completion *wait)
5599{
5600 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005601
5602 while (1) {
5603 if (wait_for_completion_io_timeout(wait,
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005604 PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
Kevin Barnett14bb2152016-08-31 14:54:35 -05005605 rc = 0;
5606 break;
5607 }
5608
5609 pqi_check_ctrl_health(ctrl_info);
5610 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett4e8415e2017-05-03 18:54:18 -05005611 rc = -ENXIO;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005612 break;
5613 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05005614 }
5615
5616 return rc;
5617}
5618
5619static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005620 struct pqi_scsi_dev *device)
5621{
5622 int rc;
5623 struct pqi_io_request *io_request;
5624 DECLARE_COMPLETION_ONSTACK(wait);
5625 struct pqi_task_management_request *request;
5626
Kevin Barnett6c223762016-06-27 16:41:00 -05005627 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05005628 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05005629 io_request->context = &wait;
5630
5631 request = io_request->iu;
5632 memset(request, 0, sizeof(*request));
5633
5634 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5635 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5636 &request->header.iu_length);
5637 put_unaligned_le16(io_request->index, &request->request_id);
5638 memcpy(request->lun_number, device->scsi3addr,
5639 sizeof(request->lun_number));
5640 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5641
5642 pqi_start_io(ctrl_info,
5643 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5644 io_request);
5645
Kevin Barnett14bb2152016-08-31 14:54:35 -05005646 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5647 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05005648 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05005649
5650 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05005651
5652 return rc;
5653}
5654
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005655#define PQI_LUN_RESET_RETRIES 3
5656#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
Kevin Barnett6c223762016-06-27 16:41:00 -05005657/* Performs a reset at the LUN level. */
5658
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005659static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005660 struct pqi_scsi_dev *device)
5661{
5662 int rc;
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005663 unsigned int retries;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005664 unsigned long timeout_secs;
Kevin Barnett6c223762016-06-27 16:41:00 -05005665
Mahesh Rajashekhara34063842018-12-07 16:28:16 -06005666 for (retries = 0;;) {
5667 rc = pqi_lun_reset(ctrl_info, device);
5668 if (rc != -EAGAIN ||
5669 ++retries > PQI_LUN_RESET_RETRIES)
5670 break;
5671 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5672 }
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005673 timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT;
5674
5675 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
Kevin Barnett6c223762016-06-27 16:41:00 -05005676
Kevin Barnett14bb2152016-08-31 14:54:35 -05005677 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05005678}
5679
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005680static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5681 struct pqi_scsi_dev *device)
5682{
5683 int rc;
5684
5685 mutex_lock(&ctrl_info->lun_reset_mutex);
5686
5687 pqi_ctrl_block_requests(ctrl_info);
5688 pqi_ctrl_wait_until_quiesced(ctrl_info);
5689 pqi_fail_io_queued_for_device(ctrl_info, device);
5690 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5691 pqi_device_reset_start(device);
5692 pqi_ctrl_unblock_requests(ctrl_info);
5693
5694 if (rc)
5695 rc = FAILED;
5696 else
5697 rc = _pqi_device_reset(ctrl_info, device);
5698
5699 pqi_device_reset_done(device);
5700
5701 mutex_unlock(&ctrl_info->lun_reset_mutex);
5702 return rc;
5703}
5704
Kevin Barnett6c223762016-06-27 16:41:00 -05005705static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5706{
5707 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005708 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05005709 struct pqi_ctrl_info *ctrl_info;
5710 struct pqi_scsi_dev *device;
5711
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005712 shost = scmd->device->host;
5713 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05005714 device = scmd->device->hostdata;
5715
5716 dev_err(&ctrl_info->pci_dev->dev,
5717 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005718 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05005719
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005720 pqi_check_ctrl_health(ctrl_info);
5721 if (pqi_ctrl_offline(ctrl_info)) {
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005722 dev_err(&ctrl_info->pci_dev->dev,
5723 "controller %u offlined - cannot send device reset\n",
5724 ctrl_info->ctrl_id);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005725 rc = FAILED;
5726 goto out;
5727 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005728
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005729 pqi_wait_until_ofa_finished(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005730
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06005731 rc = pqi_device_reset(ctrl_info, device);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005732out:
Kevin Barnett6c223762016-06-27 16:41:00 -05005733 dev_err(&ctrl_info->pci_dev->dev,
5734 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005735 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05005736 rc == SUCCESS ? "SUCCESS" : "FAILED");
5737
5738 return rc;
5739}
5740
5741static int pqi_slave_alloc(struct scsi_device *sdev)
5742{
5743 struct pqi_scsi_dev *device;
5744 unsigned long flags;
5745 struct pqi_ctrl_info *ctrl_info;
5746 struct scsi_target *starget;
5747 struct sas_rphy *rphy;
5748
5749 ctrl_info = shost_to_hba(sdev->host);
5750
5751 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5752
5753 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5754 starget = scsi_target(sdev);
5755 rphy = target_to_rphy(starget);
5756 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5757 if (device) {
5758 device->target = sdev_id(sdev);
5759 device->lun = sdev->lun;
5760 device->target_lun_valid = true;
5761 }
5762 } else {
5763 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5764 sdev_id(sdev), sdev->lun);
5765 }
5766
Kevin Barnett94086f52017-05-03 18:54:31 -05005767 if (device) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005768 sdev->hostdata = device;
5769 device->sdev = sdev;
5770 if (device->queue_depth) {
5771 device->advertised_queue_depth = device->queue_depth;
5772 scsi_change_queue_depth(sdev,
5773 device->advertised_queue_depth);
5774 }
Dave Carrollb6e2ef62018-12-07 16:28:23 -06005775 if (pqi_is_logical_device(device))
5776 pqi_disable_write_same(sdev);
Dave Carroll2b447f82018-12-07 16:29:05 -06005777 else
5778 sdev->allow_restart = 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05005779 }
5780
5781 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5782
5783 return 0;
5784}
5785
Christoph Hellwig52198222016-11-01 08:12:49 -06005786static int pqi_map_queues(struct Scsi_Host *shost)
5787{
5788 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5789
Keith Buschf23f5bec2018-03-27 09:39:06 -06005790 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
Christoph Hellwig52198222016-11-01 08:12:49 -06005791}
5792
Kevin Barnett6c223762016-06-27 16:41:00 -05005793static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5794 void __user *arg)
5795{
5796 struct pci_dev *pci_dev;
5797 u32 subsystem_vendor;
5798 u32 subsystem_device;
5799 cciss_pci_info_struct pciinfo;
5800
5801 if (!arg)
5802 return -EINVAL;
5803
5804 pci_dev = ctrl_info->pci_dev;
5805
5806 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5807 pciinfo.bus = pci_dev->bus->number;
5808 pciinfo.dev_fn = pci_dev->devfn;
5809 subsystem_vendor = pci_dev->subsystem_vendor;
5810 subsystem_device = pci_dev->subsystem_device;
5811 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5812 subsystem_vendor;
5813
5814 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5815 return -EFAULT;
5816
5817 return 0;
5818}
5819
5820static int pqi_getdrivver_ioctl(void __user *arg)
5821{
5822 u32 version;
5823
5824 if (!arg)
5825 return -EINVAL;
5826
5827 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5828 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5829
5830 if (copy_to_user(arg, &version, sizeof(version)))
5831 return -EFAULT;
5832
5833 return 0;
5834}
5835
5836struct ciss_error_info {
5837 u8 scsi_status;
5838 int command_status;
5839 size_t sense_data_length;
5840};
5841
5842static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5843 struct ciss_error_info *ciss_error_info)
5844{
5845 int ciss_cmd_status;
5846 size_t sense_data_length;
5847
5848 switch (pqi_error_info->data_out_result) {
5849 case PQI_DATA_IN_OUT_GOOD:
5850 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5851 break;
5852 case PQI_DATA_IN_OUT_UNDERFLOW:
5853 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5854 break;
5855 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5856 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5857 break;
5858 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5859 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5860 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5861 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5862 case PQI_DATA_IN_OUT_ERROR:
5863 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5864 break;
5865 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5866 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5867 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5868 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5869 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5870 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5871 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5872 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5873 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5874 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5875 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5876 break;
5877 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5878 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5879 break;
5880 case PQI_DATA_IN_OUT_ABORTED:
5881 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5882 break;
5883 case PQI_DATA_IN_OUT_TIMEOUT:
5884 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5885 break;
5886 default:
5887 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5888 break;
5889 }
5890
5891 sense_data_length =
5892 get_unaligned_le16(&pqi_error_info->sense_data_length);
5893 if (sense_data_length == 0)
5894 sense_data_length =
5895 get_unaligned_le16(&pqi_error_info->response_data_length);
5896 if (sense_data_length)
5897 if (sense_data_length > sizeof(pqi_error_info->data))
5898 sense_data_length = sizeof(pqi_error_info->data);
5899
5900 ciss_error_info->scsi_status = pqi_error_info->status;
5901 ciss_error_info->command_status = ciss_cmd_status;
5902 ciss_error_info->sense_data_length = sense_data_length;
5903}
5904
5905static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5906{
5907 int rc;
5908 char *kernel_buffer = NULL;
5909 u16 iu_length;
5910 size_t sense_data_length;
5911 IOCTL_Command_struct iocommand;
5912 struct pqi_raid_path_request request;
5913 struct pqi_raid_error_info pqi_error_info;
5914 struct ciss_error_info ciss_error_info;
5915
5916 if (pqi_ctrl_offline(ctrl_info))
5917 return -ENXIO;
5918 if (!arg)
5919 return -EINVAL;
5920 if (!capable(CAP_SYS_RAWIO))
5921 return -EPERM;
5922 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5923 return -EFAULT;
5924 if (iocommand.buf_size < 1 &&
5925 iocommand.Request.Type.Direction != XFER_NONE)
5926 return -EINVAL;
5927 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5928 return -EINVAL;
5929 if (iocommand.Request.Type.Type != TYPE_CMD)
5930 return -EINVAL;
5931
5932 switch (iocommand.Request.Type.Direction) {
5933 case XFER_NONE:
5934 case XFER_WRITE:
5935 case XFER_READ:
Kevin Barnett41555d52017-08-10 13:46:51 -05005936 case XFER_READ | XFER_WRITE:
Kevin Barnett6c223762016-06-27 16:41:00 -05005937 break;
5938 default:
5939 return -EINVAL;
5940 }
5941
5942 if (iocommand.buf_size > 0) {
5943 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5944 if (!kernel_buffer)
5945 return -ENOMEM;
5946 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5947 if (copy_from_user(kernel_buffer, iocommand.buf,
5948 iocommand.buf_size)) {
5949 rc = -EFAULT;
5950 goto out;
5951 }
5952 } else {
5953 memset(kernel_buffer, 0, iocommand.buf_size);
5954 }
5955 }
5956
5957 memset(&request, 0, sizeof(request));
5958
5959 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5960 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5961 PQI_REQUEST_HEADER_LENGTH;
5962 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5963 sizeof(request.lun_number));
5964 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5965 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5966
5967 switch (iocommand.Request.Type.Direction) {
5968 case XFER_NONE:
5969 request.data_direction = SOP_NO_DIRECTION_FLAG;
5970 break;
5971 case XFER_WRITE:
5972 request.data_direction = SOP_WRITE_FLAG;
5973 break;
5974 case XFER_READ:
5975 request.data_direction = SOP_READ_FLAG;
5976 break;
Kevin Barnett41555d52017-08-10 13:46:51 -05005977 case XFER_READ | XFER_WRITE:
5978 request.data_direction = SOP_BIDIRECTIONAL;
5979 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05005980 }
5981
5982 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5983
5984 if (iocommand.buf_size > 0) {
5985 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5986
5987 rc = pqi_map_single(ctrl_info->pci_dev,
5988 &request.sg_descriptors[0], kernel_buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005989 iocommand.buf_size, DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005990 if (rc)
5991 goto out;
5992
5993 iu_length += sizeof(request.sg_descriptors[0]);
5994 }
5995
5996 put_unaligned_le16(iu_length, &request.header.iu_length);
5997
5998 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5999 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6000
6001 if (iocommand.buf_size > 0)
6002 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02006003 DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05006004
6005 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6006
6007 if (rc == 0) {
6008 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6009 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6010 iocommand.error_info.CommandStatus =
6011 ciss_error_info.command_status;
6012 sense_data_length = ciss_error_info.sense_data_length;
6013 if (sense_data_length) {
6014 if (sense_data_length >
6015 sizeof(iocommand.error_info.SenseInfo))
6016 sense_data_length =
6017 sizeof(iocommand.error_info.SenseInfo);
6018 memcpy(iocommand.error_info.SenseInfo,
6019 pqi_error_info.data, sense_data_length);
6020 iocommand.error_info.SenseLen = sense_data_length;
6021 }
6022 }
6023
6024 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6025 rc = -EFAULT;
6026 goto out;
6027 }
6028
6029 if (rc == 0 && iocommand.buf_size > 0 &&
6030 (iocommand.Request.Type.Direction & XFER_READ)) {
6031 if (copy_to_user(iocommand.buf, kernel_buffer,
6032 iocommand.buf_size)) {
6033 rc = -EFAULT;
6034 }
6035 }
6036
6037out:
6038 kfree(kernel_buffer);
6039
6040 return rc;
6041}
6042
6043static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6044{
6045 int rc;
6046 struct pqi_ctrl_info *ctrl_info;
6047
6048 ctrl_info = shost_to_hba(sdev->host);
6049
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06006050 if (pqi_ctrl_in_ofa(ctrl_info))
6051 return -EBUSY;
6052
Kevin Barnett6c223762016-06-27 16:41:00 -05006053 switch (cmd) {
6054 case CCISS_DEREGDISK:
6055 case CCISS_REGNEWDISK:
6056 case CCISS_REGNEWD:
6057 rc = pqi_scan_scsi_devices(ctrl_info);
6058 break;
6059 case CCISS_GETPCIINFO:
6060 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6061 break;
6062 case CCISS_GETDRIVVER:
6063 rc = pqi_getdrivver_ioctl(arg);
6064 break;
6065 case CCISS_PASSTHRU:
6066 rc = pqi_passthru_ioctl(ctrl_info, arg);
6067 break;
6068 default:
6069 rc = -EINVAL;
6070 break;
6071 }
6072
6073 return rc;
6074}
6075
6076static ssize_t pqi_version_show(struct device *dev,
6077 struct device_attribute *attr, char *buffer)
6078{
6079 ssize_t count = 0;
6080 struct Scsi_Host *shost;
6081 struct pqi_ctrl_info *ctrl_info;
6082
6083 shost = class_to_shost(dev);
6084 ctrl_info = shost_to_hba(shost);
6085
6086 count += snprintf(buffer + count, PAGE_SIZE - count,
6087 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6088
6089 count += snprintf(buffer + count, PAGE_SIZE - count,
6090 "firmware: %s\n", ctrl_info->firmware_version);
6091
6092 return count;
6093}
6094
6095static ssize_t pqi_host_rescan_store(struct device *dev,
6096 struct device_attribute *attr, const char *buffer, size_t count)
6097{
6098 struct Scsi_Host *shost = class_to_shost(dev);
6099
6100 pqi_scan_start(shost);
6101
6102 return count;
6103}
6104
Kevin Barnett3c509762017-05-03 18:54:37 -05006105static ssize_t pqi_lockup_action_show(struct device *dev,
6106 struct device_attribute *attr, char *buffer)
6107{
6108 int count = 0;
6109 unsigned int i;
6110
6111 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6112 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6113 count += snprintf(buffer + count, PAGE_SIZE - count,
6114 "[%s] ", pqi_lockup_actions[i].name);
6115 else
6116 count += snprintf(buffer + count, PAGE_SIZE - count,
6117 "%s ", pqi_lockup_actions[i].name);
6118 }
6119
6120 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
6121
6122 return count;
6123}
6124
6125static ssize_t pqi_lockup_action_store(struct device *dev,
6126 struct device_attribute *attr, const char *buffer, size_t count)
6127{
6128 unsigned int i;
6129 char *action_name;
6130 char action_name_buffer[32];
6131
6132 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6133 action_name = strstrip(action_name_buffer);
6134
6135 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6136 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6137 pqi_lockup_action = pqi_lockup_actions[i].action;
6138 return count;
6139 }
6140 }
6141
6142 return -EINVAL;
6143}
6144
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006145static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
6146static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
Kevin Barnett3c509762017-05-03 18:54:37 -05006147static DEVICE_ATTR(lockup_action, 0644,
6148 pqi_lockup_action_show, pqi_lockup_action_store);
Kevin Barnett6c223762016-06-27 16:41:00 -05006149
6150static struct device_attribute *pqi_shost_attrs[] = {
6151 &dev_attr_version,
6152 &dev_attr_rescan,
Kevin Barnett3c509762017-05-03 18:54:37 -05006153 &dev_attr_lockup_action,
Kevin Barnett6c223762016-06-27 16:41:00 -05006154 NULL
6155};
6156
Dave Carrollcd128242018-12-07 16:28:47 -06006157static ssize_t pqi_unique_id_show(struct device *dev,
6158 struct device_attribute *attr, char *buffer)
6159{
6160 struct pqi_ctrl_info *ctrl_info;
6161 struct scsi_device *sdev;
6162 struct pqi_scsi_dev *device;
6163 unsigned long flags;
6164 unsigned char uid[16];
6165
6166 sdev = to_scsi_device(dev);
6167 ctrl_info = shost_to_hba(sdev->host);
6168
6169 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6170
6171 device = sdev->hostdata;
6172 if (!device) {
6173 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6174 flags);
6175 return -ENODEV;
6176 }
6177 memcpy(uid, device->unique_id, sizeof(uid));
6178
6179 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6180
Murthy Bhat5995b232018-12-07 16:28:59 -06006181 return snprintf(buffer, PAGE_SIZE,
6182 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6183 uid[0], uid[1], uid[2], uid[3],
6184 uid[4], uid[5], uid[6], uid[7],
6185 uid[8], uid[9], uid[10], uid[11],
6186 uid[12], uid[13], uid[14], uid[15]);
Dave Carrollcd128242018-12-07 16:28:47 -06006187}
6188
6189static ssize_t pqi_lunid_show(struct device *dev,
6190 struct device_attribute *attr, char *buffer)
6191{
6192 struct pqi_ctrl_info *ctrl_info;
6193 struct scsi_device *sdev;
6194 struct pqi_scsi_dev *device;
6195 unsigned long flags;
6196 u8 lunid[8];
6197
6198 sdev = to_scsi_device(dev);
6199 ctrl_info = shost_to_hba(sdev->host);
6200
6201 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6202
6203 device = sdev->hostdata;
6204 if (!device) {
6205 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6206 flags);
6207 return -ENODEV;
6208 }
6209 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6210
6211 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6212
6213 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6214}
6215
6216#define MAX_PATHS 8
6217static ssize_t pqi_path_info_show(struct device *dev,
6218 struct device_attribute *attr, char *buf)
6219{
6220 struct pqi_ctrl_info *ctrl_info;
6221 struct scsi_device *sdev;
6222 struct pqi_scsi_dev *device;
6223 unsigned long flags;
6224 int i;
6225 int output_len = 0;
6226 u8 box;
6227 u8 bay;
6228 u8 path_map_index = 0;
6229 char *active;
6230 unsigned char phys_connector[2];
6231
6232 sdev = to_scsi_device(dev);
6233 ctrl_info = shost_to_hba(sdev->host);
6234
6235 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6236
6237 device = sdev->hostdata;
6238 if (!device) {
6239 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6240 flags);
6241 return -ENODEV;
6242 }
6243
6244 bay = device->bay;
6245 for (i = 0; i < MAX_PATHS; i++) {
6246 path_map_index = 1<<i;
6247 if (i == device->active_path_index)
6248 active = "Active";
6249 else if (device->path_map & path_map_index)
6250 active = "Inactive";
6251 else
6252 continue;
6253
6254 output_len += scnprintf(buf + output_len,
6255 PAGE_SIZE - output_len,
6256 "[%d:%d:%d:%d] %20.20s ",
6257 ctrl_info->scsi_host->host_no,
6258 device->bus, device->target,
6259 device->lun,
6260 scsi_device_type(device->devtype));
6261
6262 if (device->devtype == TYPE_RAID ||
6263 pqi_is_logical_device(device))
6264 goto end_buffer;
6265
6266 memcpy(&phys_connector, &device->phys_connector[i],
6267 sizeof(phys_connector));
6268 if (phys_connector[0] < '0')
6269 phys_connector[0] = '0';
6270 if (phys_connector[1] < '0')
6271 phys_connector[1] = '0';
6272
6273 output_len += scnprintf(buf + output_len,
6274 PAGE_SIZE - output_len,
6275 "PORT: %.2s ", phys_connector);
6276
6277 box = device->box[i];
6278 if (box != 0 && box != 0xFF)
6279 output_len += scnprintf(buf + output_len,
6280 PAGE_SIZE - output_len,
6281 "BOX: %hhu ", box);
6282
6283 if ((device->devtype == TYPE_DISK ||
6284 device->devtype == TYPE_ZBC) &&
6285 pqi_expose_device(device))
6286 output_len += scnprintf(buf + output_len,
6287 PAGE_SIZE - output_len,
6288 "BAY: %hhu ", bay);
6289
6290end_buffer:
6291 output_len += scnprintf(buf + output_len,
6292 PAGE_SIZE - output_len,
6293 "%s\n", active);
6294 }
6295
6296 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6297 return output_len;
6298}
6299
6300
Kevin Barnett6c223762016-06-27 16:41:00 -05006301static ssize_t pqi_sas_address_show(struct device *dev,
6302 struct device_attribute *attr, char *buffer)
6303{
6304 struct pqi_ctrl_info *ctrl_info;
6305 struct scsi_device *sdev;
6306 struct pqi_scsi_dev *device;
6307 unsigned long flags;
6308 u64 sas_address;
6309
6310 sdev = to_scsi_device(dev);
6311 ctrl_info = shost_to_hba(sdev->host);
6312
6313 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6314
6315 device = sdev->hostdata;
6316 if (pqi_is_logical_device(device)) {
6317 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6318 flags);
6319 return -ENODEV;
6320 }
6321 sas_address = device->sas_address;
6322
6323 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6324
6325 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6326}
6327
6328static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6329 struct device_attribute *attr, char *buffer)
6330{
6331 struct pqi_ctrl_info *ctrl_info;
6332 struct scsi_device *sdev;
6333 struct pqi_scsi_dev *device;
6334 unsigned long flags;
6335
6336 sdev = to_scsi_device(dev);
6337 ctrl_info = shost_to_hba(sdev->host);
6338
6339 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6340
6341 device = sdev->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05006342 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
Kevin Barnett6c223762016-06-27 16:41:00 -05006343 buffer[1] = '\n';
6344 buffer[2] = '\0';
6345
6346 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6347
6348 return 2;
6349}
6350
Kevin Barnetta9f93392017-05-03 18:55:31 -05006351static ssize_t pqi_raid_level_show(struct device *dev,
6352 struct device_attribute *attr, char *buffer)
6353{
6354 struct pqi_ctrl_info *ctrl_info;
6355 struct scsi_device *sdev;
6356 struct pqi_scsi_dev *device;
6357 unsigned long flags;
6358 char *raid_level;
6359
6360 sdev = to_scsi_device(dev);
6361 ctrl_info = shost_to_hba(sdev->host);
6362
6363 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6364
6365 device = sdev->hostdata;
6366
6367 if (pqi_is_logical_device(device))
6368 raid_level = pqi_raid_level_to_string(device->raid_level);
6369 else
6370 raid_level = "N/A";
6371
6372 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6373
6374 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6375}
6376
Dave Carrollcd128242018-12-07 16:28:47 -06006377static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6378static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6379static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006380static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6381static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
Kevin Barnett6c223762016-06-27 16:41:00 -05006382 pqi_ssd_smart_path_enabled_show, NULL);
Kevin Barnetta9f93392017-05-03 18:55:31 -05006383static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
Kevin Barnett6c223762016-06-27 16:41:00 -05006384
6385static struct device_attribute *pqi_sdev_attrs[] = {
Dave Carrollcd128242018-12-07 16:28:47 -06006386 &dev_attr_lunid,
6387 &dev_attr_unique_id,
6388 &dev_attr_path_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05006389 &dev_attr_sas_address,
6390 &dev_attr_ssd_smart_path_enabled,
Kevin Barnetta9f93392017-05-03 18:55:31 -05006391 &dev_attr_raid_level,
Kevin Barnett6c223762016-06-27 16:41:00 -05006392 NULL
6393};
6394
6395static struct scsi_host_template pqi_driver_template = {
6396 .module = THIS_MODULE,
6397 .name = DRIVER_NAME_SHORT,
6398 .proc_name = DRIVER_NAME_SHORT,
6399 .queuecommand = pqi_scsi_queue_command,
6400 .scan_start = pqi_scan_start,
6401 .scan_finished = pqi_scan_finished,
6402 .this_id = -1,
Kevin Barnett6c223762016-06-27 16:41:00 -05006403 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6404 .ioctl = pqi_ioctl,
6405 .slave_alloc = pqi_slave_alloc,
Christoph Hellwig52198222016-11-01 08:12:49 -06006406 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05006407 .sdev_attrs = pqi_sdev_attrs,
6408 .shost_attrs = pqi_shost_attrs,
6409};
6410
6411static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6412{
6413 int rc;
6414 struct Scsi_Host *shost;
6415
6416 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6417 if (!shost) {
6418 dev_err(&ctrl_info->pci_dev->dev,
6419 "scsi_host_alloc failed for controller %u\n",
6420 ctrl_info->ctrl_id);
6421 return -ENOMEM;
6422 }
6423
6424 shost->io_port = 0;
6425 shost->n_io_port = 0;
6426 shost->this_id = -1;
6427 shost->max_channel = PQI_MAX_BUS;
6428 shost->max_cmd_len = MAX_COMMAND_SIZE;
6429 shost->max_lun = ~0;
6430 shost->max_id = ~0;
6431 shost->max_sectors = ctrl_info->max_sectors;
6432 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6433 shost->cmd_per_lun = shost->can_queue;
6434 shost->sg_tablesize = ctrl_info->sg_tablesize;
6435 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06006436 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05006437 shost->unique_id = shost->irq;
6438 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6439 shost->hostdata[0] = (unsigned long)ctrl_info;
6440
6441 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6442 if (rc) {
6443 dev_err(&ctrl_info->pci_dev->dev,
6444 "scsi_add_host failed for controller %u\n",
6445 ctrl_info->ctrl_id);
6446 goto free_host;
6447 }
6448
6449 rc = pqi_add_sas_host(shost, ctrl_info);
6450 if (rc) {
6451 dev_err(&ctrl_info->pci_dev->dev,
6452 "add SAS host failed for controller %u\n",
6453 ctrl_info->ctrl_id);
6454 goto remove_host;
6455 }
6456
6457 ctrl_info->scsi_host = shost;
6458
6459 return 0;
6460
6461remove_host:
6462 scsi_remove_host(shost);
6463free_host:
6464 scsi_host_put(shost);
6465
6466 return rc;
6467}
6468
6469static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6470{
6471 struct Scsi_Host *shost;
6472
6473 pqi_delete_sas_host(ctrl_info);
6474
6475 shost = ctrl_info->scsi_host;
6476 if (!shost)
6477 return;
6478
6479 scsi_remove_host(shost);
6480 scsi_host_put(shost);
6481}
6482
Kevin Barnett336b6812017-08-10 13:46:39 -05006483static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6484{
6485 int rc = 0;
6486 struct pqi_device_registers __iomem *pqi_registers;
6487 unsigned long timeout;
6488 unsigned int timeout_msecs;
6489 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05006490
Kevin Barnett336b6812017-08-10 13:46:39 -05006491 pqi_registers = ctrl_info->pqi_registers;
6492 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6493 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6494
6495 while (1) {
6496 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6497 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6498 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6499 break;
6500 pqi_check_ctrl_health(ctrl_info);
6501 if (pqi_ctrl_offline(ctrl_info)) {
6502 rc = -ENXIO;
6503 break;
6504 }
6505 if (time_after(jiffies, timeout)) {
6506 rc = -ETIMEDOUT;
6507 break;
6508 }
6509 }
6510
6511 return rc;
6512}
Kevin Barnett6c223762016-06-27 16:41:00 -05006513
6514static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6515{
6516 int rc;
Kevin Barnett336b6812017-08-10 13:46:39 -05006517 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05006518
Kevin Barnett336b6812017-08-10 13:46:39 -05006519 if (ctrl_info->pqi_reset_quiesce_supported) {
6520 rc = sis_pqi_reset_quiesce(ctrl_info);
6521 if (rc) {
6522 dev_err(&ctrl_info->pci_dev->dev,
6523 "PQI reset failed during quiesce with error %d\n",
6524 rc);
6525 return rc;
6526 }
6527 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006528
Kevin Barnett336b6812017-08-10 13:46:39 -05006529 reset_reg.all_bits = 0;
6530 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6531 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
Kevin Barnett6c223762016-06-27 16:41:00 -05006532
Kevin Barnett336b6812017-08-10 13:46:39 -05006533 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6534
6535 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006536 if (rc)
6537 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett336b6812017-08-10 13:46:39 -05006538 "PQI reset failed with error %d\n", rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05006539
6540 return rc;
6541}
6542
6543static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
6544{
6545 int rc;
6546 struct bmic_identify_controller *identify;
6547
6548 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6549 if (!identify)
6550 return -ENOMEM;
6551
6552 rc = pqi_identify_controller(ctrl_info, identify);
6553 if (rc)
6554 goto out;
6555
6556 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6557 sizeof(identify->firmware_version));
6558 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6559 snprintf(ctrl_info->firmware_version +
6560 strlen(ctrl_info->firmware_version),
6561 sizeof(ctrl_info->firmware_version),
6562 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6563
6564out:
6565 kfree(identify);
6566
6567 return rc;
6568}
6569
Kevin Barnettb212c252018-12-07 16:28:10 -06006570struct pqi_config_table_section_info {
6571 struct pqi_ctrl_info *ctrl_info;
6572 void *section;
6573 u32 section_offset;
6574 void __iomem *section_iomem_addr;
6575};
6576
6577static inline bool pqi_is_firmware_feature_supported(
6578 struct pqi_config_table_firmware_features *firmware_features,
6579 unsigned int bit_position)
6580{
6581 unsigned int byte_index;
6582
6583 byte_index = bit_position / BITS_PER_BYTE;
6584
6585 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6586 return false;
6587
6588 return firmware_features->features_supported[byte_index] &
6589 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6590}
6591
6592static inline bool pqi_is_firmware_feature_enabled(
6593 struct pqi_config_table_firmware_features *firmware_features,
6594 void __iomem *firmware_features_iomem_addr,
6595 unsigned int bit_position)
6596{
6597 unsigned int byte_index;
6598 u8 __iomem *features_enabled_iomem_addr;
6599
6600 byte_index = (bit_position / BITS_PER_BYTE) +
6601 (le16_to_cpu(firmware_features->num_elements) * 2);
6602
6603 features_enabled_iomem_addr = firmware_features_iomem_addr +
6604 offsetof(struct pqi_config_table_firmware_features,
6605 features_supported) + byte_index;
6606
6607 return *((__force u8 *)features_enabled_iomem_addr) &
6608 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6609}
6610
6611static inline void pqi_request_firmware_feature(
6612 struct pqi_config_table_firmware_features *firmware_features,
6613 unsigned int bit_position)
6614{
6615 unsigned int byte_index;
6616
6617 byte_index = (bit_position / BITS_PER_BYTE) +
6618 le16_to_cpu(firmware_features->num_elements);
6619
6620 firmware_features->features_supported[byte_index] |=
6621 (1 << (bit_position % BITS_PER_BYTE));
6622}
6623
6624static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6625 u16 first_section, u16 last_section)
6626{
6627 struct pqi_vendor_general_request request;
6628
6629 memset(&request, 0, sizeof(request));
6630
6631 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6632 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6633 &request.header.iu_length);
6634 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6635 &request.function_code);
6636 put_unaligned_le16(first_section,
6637 &request.data.config_table_update.first_section);
6638 put_unaligned_le16(last_section,
6639 &request.data.config_table_update.last_section);
6640
6641 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6642 0, NULL, NO_TIMEOUT);
6643}
6644
6645static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6646 struct pqi_config_table_firmware_features *firmware_features,
6647 void __iomem *firmware_features_iomem_addr)
6648{
6649 void *features_requested;
6650 void __iomem *features_requested_iomem_addr;
6651
6652 features_requested = firmware_features->features_supported +
6653 le16_to_cpu(firmware_features->num_elements);
6654
6655 features_requested_iomem_addr = firmware_features_iomem_addr +
6656 (features_requested - (void *)firmware_features);
6657
6658 memcpy_toio(features_requested_iomem_addr, features_requested,
6659 le16_to_cpu(firmware_features->num_elements));
6660
6661 return pqi_config_table_update(ctrl_info,
6662 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6663 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6664}
6665
6666struct pqi_firmware_feature {
6667 char *feature_name;
6668 unsigned int feature_bit;
6669 bool supported;
6670 bool enabled;
6671 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6672 struct pqi_firmware_feature *firmware_feature);
6673};
6674
6675static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6676 struct pqi_firmware_feature *firmware_feature)
6677{
6678 if (!firmware_feature->supported) {
6679 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6680 firmware_feature->feature_name);
6681 return;
6682 }
6683
6684 if (firmware_feature->enabled) {
6685 dev_info(&ctrl_info->pci_dev->dev,
6686 "%s enabled\n", firmware_feature->feature_name);
6687 return;
6688 }
6689
6690 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6691 firmware_feature->feature_name);
6692}
6693
6694static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6695 struct pqi_firmware_feature *firmware_feature)
6696{
6697 if (firmware_feature->feature_status)
6698 firmware_feature->feature_status(ctrl_info, firmware_feature);
6699}
6700
6701static DEFINE_MUTEX(pqi_firmware_features_mutex);
6702
6703static struct pqi_firmware_feature pqi_firmware_features[] = {
6704 {
6705 .feature_name = "Online Firmware Activation",
6706 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6707 .feature_status = pqi_firmware_feature_status,
6708 },
6709 {
6710 .feature_name = "Serial Management Protocol",
6711 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6712 .feature_status = pqi_firmware_feature_status,
6713 },
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06006714 {
6715 .feature_name = "New Soft Reset Handshake",
6716 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6717 .feature_status = pqi_firmware_feature_status,
6718 },
Kevin Barnettb212c252018-12-07 16:28:10 -06006719};
6720
6721static void pqi_process_firmware_features(
6722 struct pqi_config_table_section_info *section_info)
6723{
6724 int rc;
6725 struct pqi_ctrl_info *ctrl_info;
6726 struct pqi_config_table_firmware_features *firmware_features;
6727 void __iomem *firmware_features_iomem_addr;
6728 unsigned int i;
6729 unsigned int num_features_supported;
6730
6731 ctrl_info = section_info->ctrl_info;
6732 firmware_features = section_info->section;
6733 firmware_features_iomem_addr = section_info->section_iomem_addr;
6734
6735 for (i = 0, num_features_supported = 0;
6736 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6737 if (pqi_is_firmware_feature_supported(firmware_features,
6738 pqi_firmware_features[i].feature_bit)) {
6739 pqi_firmware_features[i].supported = true;
6740 num_features_supported++;
6741 } else {
6742 pqi_firmware_feature_update(ctrl_info,
6743 &pqi_firmware_features[i]);
6744 }
6745 }
6746
6747 if (num_features_supported == 0)
6748 return;
6749
6750 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6751 if (!pqi_firmware_features[i].supported)
6752 continue;
6753 pqi_request_firmware_feature(firmware_features,
6754 pqi_firmware_features[i].feature_bit);
6755 }
6756
6757 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6758 firmware_features_iomem_addr);
6759 if (rc) {
6760 dev_err(&ctrl_info->pci_dev->dev,
6761 "failed to enable firmware features in PQI configuration table\n");
6762 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6763 if (!pqi_firmware_features[i].supported)
6764 continue;
6765 pqi_firmware_feature_update(ctrl_info,
6766 &pqi_firmware_features[i]);
6767 }
6768 return;
6769 }
6770
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06006771 ctrl_info->soft_reset_handshake_supported = false;
Kevin Barnettb212c252018-12-07 16:28:10 -06006772 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6773 if (!pqi_firmware_features[i].supported)
6774 continue;
6775 if (pqi_is_firmware_feature_enabled(firmware_features,
6776 firmware_features_iomem_addr,
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06006777 pqi_firmware_features[i].feature_bit)) {
Kevin Barnettb212c252018-12-07 16:28:10 -06006778 pqi_firmware_features[i].enabled = true;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06006779 if (pqi_firmware_features[i].feature_bit ==
6780 PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
6781 ctrl_info->soft_reset_handshake_supported =
6782 true;
6783 }
Kevin Barnettb212c252018-12-07 16:28:10 -06006784 pqi_firmware_feature_update(ctrl_info,
6785 &pqi_firmware_features[i]);
6786 }
6787}
6788
6789static void pqi_init_firmware_features(void)
6790{
6791 unsigned int i;
6792
6793 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6794 pqi_firmware_features[i].supported = false;
6795 pqi_firmware_features[i].enabled = false;
6796 }
6797}
6798
6799static void pqi_process_firmware_features_section(
6800 struct pqi_config_table_section_info *section_info)
6801{
6802 mutex_lock(&pqi_firmware_features_mutex);
6803 pqi_init_firmware_features();
6804 pqi_process_firmware_features(section_info);
6805 mutex_unlock(&pqi_firmware_features_mutex);
6806}
6807
Kevin Barnett98f87662017-05-03 18:53:11 -05006808static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6809{
6810 u32 table_length;
6811 u32 section_offset;
6812 void __iomem *table_iomem_addr;
6813 struct pqi_config_table *config_table;
6814 struct pqi_config_table_section_header *section;
Kevin Barnettb212c252018-12-07 16:28:10 -06006815 struct pqi_config_table_section_info section_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006816
6817 table_length = ctrl_info->config_table_length;
Kevin Barnettb212c252018-12-07 16:28:10 -06006818 if (table_length == 0)
6819 return 0;
Kevin Barnett98f87662017-05-03 18:53:11 -05006820
6821 config_table = kmalloc(table_length, GFP_KERNEL);
6822 if (!config_table) {
6823 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006824 "failed to allocate memory for PQI configuration table\n");
Kevin Barnett98f87662017-05-03 18:53:11 -05006825 return -ENOMEM;
6826 }
6827
6828 /*
6829 * Copy the config table contents from I/O memory space into the
6830 * temporary buffer.
6831 */
6832 table_iomem_addr = ctrl_info->iomem_base +
6833 ctrl_info->config_table_offset;
6834 memcpy_fromio(config_table, table_iomem_addr, table_length);
6835
Kevin Barnettb212c252018-12-07 16:28:10 -06006836 section_info.ctrl_info = ctrl_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006837 section_offset =
6838 get_unaligned_le32(&config_table->first_section_offset);
6839
6840 while (section_offset) {
6841 section = (void *)config_table + section_offset;
6842
Kevin Barnettb212c252018-12-07 16:28:10 -06006843 section_info.section = section;
6844 section_info.section_offset = section_offset;
6845 section_info.section_iomem_addr =
6846 table_iomem_addr + section_offset;
6847
Kevin Barnett98f87662017-05-03 18:53:11 -05006848 switch (get_unaligned_le16(&section->section_id)) {
Kevin Barnettb212c252018-12-07 16:28:10 -06006849 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
6850 pqi_process_firmware_features_section(&section_info);
6851 break;
Kevin Barnett98f87662017-05-03 18:53:11 -05006852 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
Kevin Barnett5a259e32017-05-03 18:55:43 -05006853 if (pqi_disable_heartbeat)
6854 dev_warn(&ctrl_info->pci_dev->dev,
6855 "heartbeat disabled by module parameter\n");
6856 else
6857 ctrl_info->heartbeat_counter =
6858 table_iomem_addr +
6859 section_offset +
6860 offsetof(
6861 struct pqi_config_table_heartbeat,
6862 heartbeat_counter);
Kevin Barnett98f87662017-05-03 18:53:11 -05006863 break;
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06006864 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
6865 ctrl_info->soft_reset_status =
6866 table_iomem_addr +
6867 section_offset +
6868 offsetof(struct pqi_config_table_soft_reset,
6869 soft_reset_status);
6870 break;
Kevin Barnett98f87662017-05-03 18:53:11 -05006871 }
6872
6873 section_offset =
6874 get_unaligned_le16(&section->next_section_offset);
6875 }
6876
6877 kfree(config_table);
6878
6879 return 0;
6880}
6881
Kevin Barnett162d7752017-05-03 18:52:46 -05006882/* Switches the controller from PQI mode back into SIS mode. */
6883
6884static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6885{
6886 int rc;
6887
Kevin Barnett061ef062017-05-03 18:53:05 -05006888 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
Kevin Barnett162d7752017-05-03 18:52:46 -05006889 rc = pqi_reset(ctrl_info);
6890 if (rc)
6891 return rc;
Kevin Barnett4f078e22017-08-10 13:46:57 -05006892 rc = sis_reenable_sis_mode(ctrl_info);
6893 if (rc) {
6894 dev_err(&ctrl_info->pci_dev->dev,
6895 "re-enabling SIS mode failed with error %d\n", rc);
6896 return rc;
6897 }
Kevin Barnett162d7752017-05-03 18:52:46 -05006898 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6899
6900 return 0;
6901}
6902
6903/*
6904 * If the controller isn't already in SIS mode, this function forces it into
6905 * SIS mode.
6906 */
6907
6908static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05006909{
6910 if (!sis_is_firmware_running(ctrl_info))
6911 return -ENXIO;
6912
Kevin Barnett162d7752017-05-03 18:52:46 -05006913 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6914 return 0;
6915
6916 if (sis_is_kernel_up(ctrl_info)) {
6917 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6918 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006919 }
6920
Kevin Barnett162d7752017-05-03 18:52:46 -05006921 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05006922}
6923
Kevin Barnett6c223762016-06-27 16:41:00 -05006924static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6925{
6926 int rc;
6927
Kevin Barnett162d7752017-05-03 18:52:46 -05006928 rc = pqi_force_sis_mode(ctrl_info);
6929 if (rc)
6930 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006931
6932 /*
6933 * Wait until the controller is ready to start accepting SIS
6934 * commands.
6935 */
6936 rc = sis_wait_for_ctrl_ready(ctrl_info);
Kevin Barnett8845fdf2017-05-03 18:53:36 -05006937 if (rc)
Kevin Barnett6c223762016-06-27 16:41:00 -05006938 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006939
6940 /*
6941 * Get the controller properties. This allows us to determine
6942 * whether or not it supports PQI mode.
6943 */
6944 rc = sis_get_ctrl_properties(ctrl_info);
6945 if (rc) {
6946 dev_err(&ctrl_info->pci_dev->dev,
6947 "error obtaining controller properties\n");
6948 return rc;
6949 }
6950
6951 rc = sis_get_pqi_capabilities(ctrl_info);
6952 if (rc) {
6953 dev_err(&ctrl_info->pci_dev->dev,
6954 "error obtaining controller capabilities\n");
6955 return rc;
6956 }
6957
Kevin Barnettd727a772017-05-03 18:54:25 -05006958 if (reset_devices) {
6959 if (ctrl_info->max_outstanding_requests >
6960 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6961 ctrl_info->max_outstanding_requests =
6962 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6963 } else {
6964 if (ctrl_info->max_outstanding_requests >
6965 PQI_MAX_OUTSTANDING_REQUESTS)
6966 ctrl_info->max_outstanding_requests =
6967 PQI_MAX_OUTSTANDING_REQUESTS;
6968 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006969
6970 pqi_calculate_io_resources(ctrl_info);
6971
6972 rc = pqi_alloc_error_buffer(ctrl_info);
6973 if (rc) {
6974 dev_err(&ctrl_info->pci_dev->dev,
6975 "failed to allocate PQI error buffer\n");
6976 return rc;
6977 }
6978
6979 /*
6980 * If the function we are about to call succeeds, the
6981 * controller will transition from legacy SIS mode
6982 * into PQI mode.
6983 */
6984 rc = sis_init_base_struct_addr(ctrl_info);
6985 if (rc) {
6986 dev_err(&ctrl_info->pci_dev->dev,
6987 "error initializing PQI mode\n");
6988 return rc;
6989 }
6990
6991 /* Wait for the controller to complete the SIS -> PQI transition. */
6992 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6993 if (rc) {
6994 dev_err(&ctrl_info->pci_dev->dev,
6995 "transition to PQI mode failed\n");
6996 return rc;
6997 }
6998
6999 /* From here on, we are running in PQI mode. */
7000 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05007001 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05007002
7003 rc = pqi_alloc_admin_queues(ctrl_info);
7004 if (rc) {
7005 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05007006 "failed to allocate admin queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05007007 return rc;
7008 }
7009
7010 rc = pqi_create_admin_queues(ctrl_info);
7011 if (rc) {
7012 dev_err(&ctrl_info->pci_dev->dev,
7013 "error creating admin queues\n");
7014 return rc;
7015 }
7016
7017 rc = pqi_report_device_capability(ctrl_info);
7018 if (rc) {
7019 dev_err(&ctrl_info->pci_dev->dev,
7020 "obtaining device capability failed\n");
7021 return rc;
7022 }
7023
7024 rc = pqi_validate_device_capability(ctrl_info);
7025 if (rc)
7026 return rc;
7027
7028 pqi_calculate_queue_resources(ctrl_info);
7029
7030 rc = pqi_enable_msix_interrupts(ctrl_info);
7031 if (rc)
7032 return rc;
7033
7034 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7035 ctrl_info->max_msix_vectors =
7036 ctrl_info->num_msix_vectors_enabled;
7037 pqi_calculate_queue_resources(ctrl_info);
7038 }
7039
7040 rc = pqi_alloc_io_resources(ctrl_info);
7041 if (rc)
7042 return rc;
7043
7044 rc = pqi_alloc_operational_queues(ctrl_info);
Kevin Barnettd87d5472017-05-03 18:54:00 -05007045 if (rc) {
7046 dev_err(&ctrl_info->pci_dev->dev,
7047 "failed to allocate operational queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05007048 return rc;
Kevin Barnettd87d5472017-05-03 18:54:00 -05007049 }
Kevin Barnett6c223762016-06-27 16:41:00 -05007050
7051 pqi_init_operational_queues(ctrl_info);
7052
7053 rc = pqi_request_irqs(ctrl_info);
7054 if (rc)
7055 return rc;
7056
Kevin Barnett6c223762016-06-27 16:41:00 -05007057 rc = pqi_create_queues(ctrl_info);
7058 if (rc)
7059 return rc;
7060
Kevin Barnett061ef062017-05-03 18:53:05 -05007061 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7062
7063 ctrl_info->controller_online = true;
Kevin Barnettb212c252018-12-07 16:28:10 -06007064
7065 rc = pqi_process_config_table(ctrl_info);
7066 if (rc)
7067 return rc;
7068
Kevin Barnett061ef062017-05-03 18:53:05 -05007069 pqi_start_heartbeat_timer(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007070
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05007071 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007072 if (rc) {
7073 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05007074 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05007075 return rc;
7076 }
7077
Kevin Barnett6c223762016-06-27 16:41:00 -05007078 /* Register with the SCSI subsystem. */
7079 rc = pqi_register_scsi(ctrl_info);
7080 if (rc)
7081 return rc;
7082
7083 rc = pqi_get_ctrl_firmware_version(ctrl_info);
7084 if (rc) {
7085 dev_err(&ctrl_info->pci_dev->dev,
7086 "error obtaining firmware version\n");
7087 return rc;
7088 }
7089
Dave Carroll171c2862018-12-07 16:28:35 -06007090 rc = pqi_set_diag_rescan(ctrl_info);
7091 if (rc) {
7092 dev_err(&ctrl_info->pci_dev->dev,
7093 "error enabling multi-lun rescan\n");
7094 return rc;
7095 }
7096
Kevin Barnett6c223762016-06-27 16:41:00 -05007097 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7098 if (rc) {
7099 dev_err(&ctrl_info->pci_dev->dev,
7100 "error updating host wellness\n");
7101 return rc;
7102 }
7103
7104 pqi_schedule_update_time_worker(ctrl_info);
7105
7106 pqi_scan_scsi_devices(ctrl_info);
7107
7108 return 0;
7109}
7110
Kevin Barnett061ef062017-05-03 18:53:05 -05007111static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7112{
7113 unsigned int i;
7114 struct pqi_admin_queues *admin_queues;
7115 struct pqi_event_queue *event_queue;
7116
7117 admin_queues = &ctrl_info->admin_queues;
7118 admin_queues->iq_pi_copy = 0;
7119 admin_queues->oq_ci_copy = 0;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05007120 writel(0, admin_queues->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05007121
7122 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7123 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7124 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7125 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7126
Kevin Barnettdac12fb2018-06-18 13:23:00 -05007127 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7128 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7129 writel(0, ctrl_info->queue_groups[i].oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05007130 }
7131
7132 event_queue = &ctrl_info->event_queue;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05007133 writel(0, event_queue->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05007134 event_queue->oq_ci_copy = 0;
7135}
7136
7137static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7138{
7139 int rc;
7140
7141 rc = pqi_force_sis_mode(ctrl_info);
7142 if (rc)
7143 return rc;
7144
7145 /*
7146 * Wait until the controller is ready to start accepting SIS
7147 * commands.
7148 */
7149 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7150 if (rc)
7151 return rc;
7152
7153 /*
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007154 * Get the controller properties. This allows us to determine
7155 * whether or not it supports PQI mode.
7156 */
7157 rc = sis_get_ctrl_properties(ctrl_info);
7158 if (rc) {
7159 dev_err(&ctrl_info->pci_dev->dev,
7160 "error obtaining controller properties\n");
7161 return rc;
7162 }
7163
7164 rc = sis_get_pqi_capabilities(ctrl_info);
7165 if (rc) {
7166 dev_err(&ctrl_info->pci_dev->dev,
7167 "error obtaining controller capabilities\n");
7168 return rc;
7169 }
7170
7171 /*
Kevin Barnett061ef062017-05-03 18:53:05 -05007172 * If the function we are about to call succeeds, the
7173 * controller will transition from legacy SIS mode
7174 * into PQI mode.
7175 */
7176 rc = sis_init_base_struct_addr(ctrl_info);
7177 if (rc) {
7178 dev_err(&ctrl_info->pci_dev->dev,
7179 "error initializing PQI mode\n");
7180 return rc;
7181 }
7182
7183 /* Wait for the controller to complete the SIS -> PQI transition. */
7184 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7185 if (rc) {
7186 dev_err(&ctrl_info->pci_dev->dev,
7187 "transition to PQI mode failed\n");
7188 return rc;
7189 }
7190
7191 /* From here on, we are running in PQI mode. */
7192 ctrl_info->pqi_mode_enabled = true;
7193 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7194
7195 pqi_reinit_queues(ctrl_info);
7196
7197 rc = pqi_create_admin_queues(ctrl_info);
7198 if (rc) {
7199 dev_err(&ctrl_info->pci_dev->dev,
7200 "error creating admin queues\n");
7201 return rc;
7202 }
7203
7204 rc = pqi_create_queues(ctrl_info);
7205 if (rc)
7206 return rc;
7207
7208 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7209
7210 ctrl_info->controller_online = true;
Kevin Barnett061ef062017-05-03 18:53:05 -05007211 pqi_ctrl_unblock_requests(ctrl_info);
7212
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007213 rc = pqi_process_config_table(ctrl_info);
7214 if (rc)
7215 return rc;
7216
7217 pqi_start_heartbeat_timer(ctrl_info);
7218
Kevin Barnett061ef062017-05-03 18:53:05 -05007219 rc = pqi_enable_events(ctrl_info);
7220 if (rc) {
7221 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05007222 "error enabling events\n");
Kevin Barnett061ef062017-05-03 18:53:05 -05007223 return rc;
7224 }
7225
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007226 rc = pqi_get_ctrl_firmware_version(ctrl_info);
7227 if (rc) {
7228 dev_err(&ctrl_info->pci_dev->dev,
7229 "error obtaining firmware version\n");
7230 return rc;
7231 }
7232
Dave Carroll171c2862018-12-07 16:28:35 -06007233 rc = pqi_set_diag_rescan(ctrl_info);
7234 if (rc) {
7235 dev_err(&ctrl_info->pci_dev->dev,
7236 "error enabling multi-lun rescan\n");
7237 return rc;
7238 }
7239
Kevin Barnett061ef062017-05-03 18:53:05 -05007240 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7241 if (rc) {
7242 dev_err(&ctrl_info->pci_dev->dev,
7243 "error updating host wellness\n");
7244 return rc;
7245 }
7246
7247 pqi_schedule_update_time_worker(ctrl_info);
7248
7249 pqi_scan_scsi_devices(ctrl_info);
7250
7251 return 0;
7252}
7253
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05007254static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7255 u16 timeout)
7256{
7257 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7258 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7259}
7260
Kevin Barnett6c223762016-06-27 16:41:00 -05007261static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7262{
7263 int rc;
7264 u64 mask;
7265
7266 rc = pci_enable_device(ctrl_info->pci_dev);
7267 if (rc) {
7268 dev_err(&ctrl_info->pci_dev->dev,
7269 "failed to enable PCI device\n");
7270 return rc;
7271 }
7272
7273 if (sizeof(dma_addr_t) > 4)
7274 mask = DMA_BIT_MASK(64);
7275 else
7276 mask = DMA_BIT_MASK(32);
7277
7278 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
7279 if (rc) {
7280 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7281 goto disable_device;
7282 }
7283
7284 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7285 if (rc) {
7286 dev_err(&ctrl_info->pci_dev->dev,
7287 "failed to obtain PCI resources\n");
7288 goto disable_device;
7289 }
7290
7291 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
7292 ctrl_info->pci_dev, 0),
7293 sizeof(struct pqi_ctrl_registers));
7294 if (!ctrl_info->iomem_base) {
7295 dev_err(&ctrl_info->pci_dev->dev,
7296 "failed to map memory for controller registers\n");
7297 rc = -ENOMEM;
7298 goto release_regions;
7299 }
7300
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05007301#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7302
7303 /* Increase the PCIe completion timeout. */
7304 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7305 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7306 if (rc) {
7307 dev_err(&ctrl_info->pci_dev->dev,
7308 "failed to set PCIe completion timeout\n");
7309 goto release_regions;
7310 }
7311
Kevin Barnett6c223762016-06-27 16:41:00 -05007312 /* Enable bus mastering. */
7313 pci_set_master(ctrl_info->pci_dev);
7314
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05007315 ctrl_info->registers = ctrl_info->iomem_base;
7316 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7317
Kevin Barnett6c223762016-06-27 16:41:00 -05007318 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7319
7320 return 0;
7321
7322release_regions:
7323 pci_release_regions(ctrl_info->pci_dev);
7324disable_device:
7325 pci_disable_device(ctrl_info->pci_dev);
7326
7327 return rc;
7328}
7329
7330static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7331{
7332 iounmap(ctrl_info->iomem_base);
7333 pci_release_regions(ctrl_info->pci_dev);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05007334 if (pci_is_enabled(ctrl_info->pci_dev))
7335 pci_disable_device(ctrl_info->pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007336 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7337}
7338
7339static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7340{
7341 struct pqi_ctrl_info *ctrl_info;
7342
7343 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7344 GFP_KERNEL, numa_node);
7345 if (!ctrl_info)
7346 return NULL;
7347
7348 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05007349 mutex_init(&ctrl_info->lun_reset_mutex);
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007350 mutex_init(&ctrl_info->ofa_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05007351
7352 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7353 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7354
7355 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7356 atomic_set(&ctrl_info->num_interrupts, 0);
7357
7358 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7359 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7360
Kees Cook74a0f572017-10-11 16:27:10 -07007361 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
Kevin Barnett5f310422017-05-03 18:54:55 -05007362 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
Kevin Barnett98f87662017-05-03 18:53:11 -05007363
Kevin Barnett6c223762016-06-27 16:41:00 -05007364 sema_init(&ctrl_info->sync_request_sem,
7365 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05007366 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05007367
Kevin Barnett376fb882017-05-03 18:54:43 -05007368 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7369 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7370 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7371 pqi_raid_bypass_retry_worker);
7372
Kevin Barnett6c223762016-06-27 16:41:00 -05007373 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
Kevin Barnett061ef062017-05-03 18:53:05 -05007374 ctrl_info->irq_mode = IRQ_MODE_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -05007375 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7376
7377 return ctrl_info;
7378}
7379
7380static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7381{
7382 kfree(ctrl_info);
7383}
7384
7385static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7386{
Kevin Barnett98bf0612017-05-03 18:52:28 -05007387 pqi_free_irqs(ctrl_info);
7388 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007389}
7390
7391static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7392{
7393 pqi_stop_heartbeat_timer(ctrl_info);
7394 pqi_free_interrupts(ctrl_info);
7395 if (ctrl_info->queue_memory_base)
7396 dma_free_coherent(&ctrl_info->pci_dev->dev,
7397 ctrl_info->queue_memory_length,
7398 ctrl_info->queue_memory_base,
7399 ctrl_info->queue_memory_base_dma_handle);
7400 if (ctrl_info->admin_queue_memory_base)
7401 dma_free_coherent(&ctrl_info->pci_dev->dev,
7402 ctrl_info->admin_queue_memory_length,
7403 ctrl_info->admin_queue_memory_base,
7404 ctrl_info->admin_queue_memory_base_dma_handle);
7405 pqi_free_all_io_requests(ctrl_info);
7406 if (ctrl_info->error_buffer)
7407 dma_free_coherent(&ctrl_info->pci_dev->dev,
7408 ctrl_info->error_buffer_length,
7409 ctrl_info->error_buffer,
7410 ctrl_info->error_buffer_dma_handle);
7411 if (ctrl_info->iomem_base)
7412 pqi_cleanup_pci_init(ctrl_info);
7413 pqi_free_ctrl_info(ctrl_info);
7414}
7415
7416static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7417{
Kevin Barnett061ef062017-05-03 18:53:05 -05007418 pqi_cancel_rescan_worker(ctrl_info);
7419 pqi_cancel_update_time_worker(ctrl_info);
Kevin Barnette57a1f92016-08-31 14:54:47 -05007420 pqi_remove_all_scsi_devices(ctrl_info);
7421 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05007422 if (ctrl_info->pqi_mode_enabled)
7423 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007424 pqi_free_ctrl_resources(ctrl_info);
7425}
7426
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007427static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7428{
7429 pqi_cancel_update_time_worker(ctrl_info);
7430 pqi_cancel_rescan_worker(ctrl_info);
7431 pqi_wait_until_lun_reset_finished(ctrl_info);
7432 pqi_wait_until_scan_finished(ctrl_info);
7433 pqi_ctrl_ofa_start(ctrl_info);
7434 pqi_ctrl_block_requests(ctrl_info);
7435 pqi_ctrl_wait_until_quiesced(ctrl_info);
7436 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7437 pqi_fail_io_queued_for_all_devices(ctrl_info);
7438 pqi_wait_until_inbound_queues_empty(ctrl_info);
7439 pqi_stop_heartbeat_timer(ctrl_info);
7440 ctrl_info->pqi_mode_enabled = false;
7441 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7442}
7443
7444static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7445{
7446 pqi_ofa_free_host_buffer(ctrl_info);
7447 ctrl_info->pqi_mode_enabled = true;
7448 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7449 ctrl_info->controller_online = true;
7450 pqi_ctrl_unblock_requests(ctrl_info);
7451 pqi_start_heartbeat_timer(ctrl_info);
7452 pqi_schedule_update_time_worker(ctrl_info);
7453 pqi_clear_soft_reset_status(ctrl_info,
7454 PQI_SOFT_RESET_ABORT);
7455 pqi_scan_scsi_devices(ctrl_info);
7456}
7457
7458static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7459 u32 total_size, u32 chunk_size)
7460{
7461 u32 sg_count;
7462 u32 size;
7463 int i;
7464 struct pqi_sg_descriptor *mem_descriptor = NULL;
7465 struct device *dev;
7466 struct pqi_ofa_memory *ofap;
7467
7468 dev = &ctrl_info->pci_dev->dev;
7469
7470 sg_count = (total_size + chunk_size - 1);
7471 do_div(sg_count, chunk_size);
7472
7473 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7474
7475 if (sg_count*chunk_size < total_size)
7476 goto out;
7477
7478 ctrl_info->pqi_ofa_chunk_virt_addr =
7479 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7480 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7481 goto out;
7482
7483 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7484 dma_addr_t dma_handle;
7485
7486 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7487 dma_zalloc_coherent(dev, chunk_size, &dma_handle,
7488 GFP_KERNEL);
7489
7490 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7491 break;
7492
7493 mem_descriptor = &ofap->sg_descriptor[i];
7494 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7495 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7496 }
7497
7498 if (!size || size < total_size)
7499 goto out_free_chunks;
7500
7501 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7502 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7503 put_unaligned_le32(size, &ofap->bytes_allocated);
7504
7505 return 0;
7506
7507out_free_chunks:
7508 while (--i >= 0) {
7509 mem_descriptor = &ofap->sg_descriptor[i];
7510 dma_free_coherent(dev, chunk_size,
7511 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7512 get_unaligned_le64(&mem_descriptor->address));
7513 }
7514 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7515
7516out:
7517 put_unaligned_le32 (0, &ofap->bytes_allocated);
7518 return -ENOMEM;
7519}
7520
7521static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7522{
7523 u32 total_size;
7524 u32 min_chunk_size;
7525 u32 chunk_sz;
7526
7527 total_size = le32_to_cpu(
7528 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7529 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7530
7531 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7532 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7533 return 0;
7534
7535 return -ENOMEM;
7536}
7537
7538static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7539 u32 bytes_requested)
7540{
7541 struct pqi_ofa_memory *pqi_ofa_memory;
7542 struct device *dev;
7543
7544 dev = &ctrl_info->pci_dev->dev;
7545 pqi_ofa_memory = dma_zalloc_coherent(dev,
7546 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7547 &ctrl_info->pqi_ofa_mem_dma_handle,
7548 GFP_KERNEL);
7549
7550 if (!pqi_ofa_memory)
7551 return;
7552
7553 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7554 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7555 sizeof(pqi_ofa_memory->signature));
7556 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7557
7558 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7559
7560 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7561 dev_err(dev, "Failed to allocate host buffer of size = %u",
7562 bytes_requested);
7563 }
7564}
7565
7566static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7567{
7568 int i;
7569 struct pqi_sg_descriptor *mem_descriptor;
7570 struct pqi_ofa_memory *ofap;
7571
7572 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7573
7574 if (!ofap)
7575 return;
7576
7577 if (!ofap->bytes_allocated)
7578 goto out;
7579
7580 mem_descriptor = ofap->sg_descriptor;
7581
7582 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7583 i++) {
7584 dma_free_coherent(&ctrl_info->pci_dev->dev,
7585 get_unaligned_le32(&mem_descriptor[i].length),
7586 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7587 get_unaligned_le64(&mem_descriptor[i].address));
7588 }
7589 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7590
7591out:
7592 dma_free_coherent(&ctrl_info->pci_dev->dev,
7593 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7594 ctrl_info->pqi_ofa_mem_dma_handle);
7595 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7596}
7597
7598static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7599{
7600 struct pqi_vendor_general_request request;
7601 size_t size;
7602 struct pqi_ofa_memory *ofap;
7603
7604 memset(&request, 0, sizeof(request));
7605
7606 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7607
7608 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7609 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7610 &request.header.iu_length);
7611 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7612 &request.function_code);
7613
7614 if (ofap) {
7615 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7616 get_unaligned_le16(&ofap->num_memory_descriptors) *
7617 sizeof(struct pqi_sg_descriptor);
7618
7619 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7620 &request.data.ofa_memory_allocation.buffer_address);
7621 put_unaligned_le32(size,
7622 &request.data.ofa_memory_allocation.buffer_length);
7623
7624 }
7625
7626 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7627 0, NULL, NO_TIMEOUT);
7628}
7629
7630#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7631
7632static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7633{
7634 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7635 return pqi_ctrl_init_resume(ctrl_info);
7636}
7637
Kevin Barnett3c509762017-05-03 18:54:37 -05007638static void pqi_perform_lockup_action(void)
7639{
7640 switch (pqi_lockup_action) {
7641 case PANIC:
7642 panic("FATAL: Smart Family Controller lockup detected");
7643 break;
7644 case REBOOT:
7645 emergency_restart();
7646 break;
7647 case NONE:
7648 default:
7649 break;
7650 }
7651}
7652
Kevin Barnett5f310422017-05-03 18:54:55 -05007653static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7654 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7655 .status = SAM_STAT_CHECK_CONDITION,
7656};
7657
7658static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05007659{
7660 unsigned int i;
Kevin Barnett376fb882017-05-03 18:54:43 -05007661 struct pqi_io_request *io_request;
Kevin Barnett376fb882017-05-03 18:54:43 -05007662 struct scsi_cmnd *scmd;
7663
Kevin Barnett5f310422017-05-03 18:54:55 -05007664 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7665 io_request = &ctrl_info->io_request_pool[i];
7666 if (atomic_read(&io_request->refcount) == 0)
7667 continue;
Kevin Barnett376fb882017-05-03 18:54:43 -05007668
Kevin Barnett5f310422017-05-03 18:54:55 -05007669 scmd = io_request->scmd;
7670 if (scmd) {
7671 set_host_byte(scmd, DID_NO_CONNECT);
7672 } else {
7673 io_request->status = -ENXIO;
7674 io_request->error_info =
7675 &pqi_ctrl_offline_raid_error_info;
Kevin Barnett376fb882017-05-03 18:54:43 -05007676 }
Kevin Barnett5f310422017-05-03 18:54:55 -05007677
7678 io_request->io_complete_callback(io_request,
7679 io_request->context);
Kevin Barnett376fb882017-05-03 18:54:43 -05007680 }
7681}
7682
Kevin Barnett5f310422017-05-03 18:54:55 -05007683static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05007684{
Kevin Barnett5f310422017-05-03 18:54:55 -05007685 pqi_perform_lockup_action();
7686 pqi_stop_heartbeat_timer(ctrl_info);
7687 pqi_free_interrupts(ctrl_info);
7688 pqi_cancel_rescan_worker(ctrl_info);
7689 pqi_cancel_update_time_worker(ctrl_info);
7690 pqi_ctrl_wait_until_quiesced(ctrl_info);
7691 pqi_fail_all_outstanding_requests(ctrl_info);
7692 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7693 pqi_ctrl_unblock_requests(ctrl_info);
7694}
7695
7696static void pqi_ctrl_offline_worker(struct work_struct *work)
7697{
7698 struct pqi_ctrl_info *ctrl_info;
7699
7700 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7701 pqi_take_ctrl_offline_deferred(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05007702}
7703
7704static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7705{
Kevin Barnett5f310422017-05-03 18:54:55 -05007706 if (!ctrl_info->controller_online)
7707 return;
7708
Kevin Barnett376fb882017-05-03 18:54:43 -05007709 ctrl_info->controller_online = false;
Kevin Barnett5f310422017-05-03 18:54:55 -05007710 ctrl_info->pqi_mode_enabled = false;
7711 pqi_ctrl_block_requests(ctrl_info);
Kevin Barnett5a259e32017-05-03 18:55:43 -05007712 if (!pqi_disable_ctrl_shutdown)
7713 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05007714 pci_disable_device(ctrl_info->pci_dev);
7715 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5f310422017-05-03 18:54:55 -05007716 schedule_work(&ctrl_info->ctrl_offline_work);
Kevin Barnett376fb882017-05-03 18:54:43 -05007717}
7718
Kevin Barnettd91d7822017-05-03 18:53:30 -05007719static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007720 const struct pci_device_id *id)
7721{
7722 char *ctrl_description;
7723
Kevin Barnett37b36842017-05-03 18:55:01 -05007724 if (id->driver_data)
Kevin Barnett6c223762016-06-27 16:41:00 -05007725 ctrl_description = (char *)id->driver_data;
Kevin Barnett37b36842017-05-03 18:55:01 -05007726 else
7727 ctrl_description = "Microsemi Smart Family Controller";
Kevin Barnett6c223762016-06-27 16:41:00 -05007728
Kevin Barnettd91d7822017-05-03 18:53:30 -05007729 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
Kevin Barnett6c223762016-06-27 16:41:00 -05007730}
7731
Kevin Barnettd91d7822017-05-03 18:53:30 -05007732static int pqi_pci_probe(struct pci_dev *pci_dev,
7733 const struct pci_device_id *id)
Kevin Barnett6c223762016-06-27 16:41:00 -05007734{
7735 int rc;
Sagar Biradar62dc51f2018-12-07 16:29:12 -06007736 int node, cp_node;
Kevin Barnett6c223762016-06-27 16:41:00 -05007737 struct pqi_ctrl_info *ctrl_info;
7738
Kevin Barnettd91d7822017-05-03 18:53:30 -05007739 pqi_print_ctrl_info(pci_dev, id);
Kevin Barnett6c223762016-06-27 16:41:00 -05007740
7741 if (pqi_disable_device_id_wildcards &&
7742 id->subvendor == PCI_ANY_ID &&
7743 id->subdevice == PCI_ANY_ID) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05007744 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007745 "controller not probed because device ID wildcards are disabled\n");
7746 return -ENODEV;
7747 }
7748
7749 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
Kevin Barnettd91d7822017-05-03 18:53:30 -05007750 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007751 "controller device ID matched using wildcards\n");
7752
Kevin Barnettd91d7822017-05-03 18:53:30 -05007753 node = dev_to_node(&pci_dev->dev);
Sagar Biradar62dc51f2018-12-07 16:29:12 -06007754 if (node == NUMA_NO_NODE) {
7755 cp_node = cpu_to_node(0);
7756 if (cp_node == NUMA_NO_NODE)
7757 cp_node = 0;
7758 set_dev_node(&pci_dev->dev, cp_node);
7759 }
Kevin Barnett6c223762016-06-27 16:41:00 -05007760
7761 ctrl_info = pqi_alloc_ctrl_info(node);
7762 if (!ctrl_info) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05007763 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007764 "failed to allocate controller info block\n");
7765 return -ENOMEM;
7766 }
7767
Kevin Barnettd91d7822017-05-03 18:53:30 -05007768 ctrl_info->pci_dev = pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05007769
7770 rc = pqi_pci_init(ctrl_info);
7771 if (rc)
7772 goto error;
7773
7774 rc = pqi_ctrl_init(ctrl_info);
7775 if (rc)
7776 goto error;
7777
7778 return 0;
7779
7780error:
7781 pqi_remove_ctrl(ctrl_info);
7782
7783 return rc;
7784}
7785
Kevin Barnettd91d7822017-05-03 18:53:30 -05007786static void pqi_pci_remove(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05007787{
7788 struct pqi_ctrl_info *ctrl_info;
7789
Kevin Barnettd91d7822017-05-03 18:53:30 -05007790 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007791 if (!ctrl_info)
7792 return;
7793
Mahesh Rajashekhara1e467312018-12-07 16:29:24 -06007794 ctrl_info->in_shutdown = true;
7795
Kevin Barnett6c223762016-06-27 16:41:00 -05007796 pqi_remove_ctrl(ctrl_info);
7797}
7798
Kevin Barnettd91d7822017-05-03 18:53:30 -05007799static void pqi_shutdown(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05007800{
7801 int rc;
7802 struct pqi_ctrl_info *ctrl_info;
7803
Kevin Barnettd91d7822017-05-03 18:53:30 -05007804 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05007805 if (!ctrl_info)
7806 goto error;
7807
7808 /*
7809 * Write all data in the controller's battery-backed cache to
7810 * storage.
7811 */
Kevin Barnett58322fe2017-08-10 13:46:45 -05007812 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
Kevin Barnettb6d47812017-08-10 13:47:03 -05007813 pqi_reset(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05007814 if (rc == 0)
7815 return;
7816
7817error:
Kevin Barnettd91d7822017-05-03 18:53:30 -05007818 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05007819 "unable to flush controller cache\n");
7820}
7821
Kevin Barnett3c509762017-05-03 18:54:37 -05007822static void pqi_process_lockup_action_param(void)
7823{
7824 unsigned int i;
7825
7826 if (!pqi_lockup_action_param)
7827 return;
7828
7829 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7830 if (strcmp(pqi_lockup_action_param,
7831 pqi_lockup_actions[i].name) == 0) {
7832 pqi_lockup_action = pqi_lockup_actions[i].action;
7833 return;
7834 }
7835 }
7836
7837 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
7838 DRIVER_NAME_SHORT, pqi_lockup_action_param);
7839}
7840
7841static void pqi_process_module_params(void)
7842{
7843 pqi_process_lockup_action_param();
7844}
7845
Arnd Bergmann5c146682017-05-18 10:32:18 +02007846static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
Kevin Barnett061ef062017-05-03 18:53:05 -05007847{
7848 struct pqi_ctrl_info *ctrl_info;
7849
7850 ctrl_info = pci_get_drvdata(pci_dev);
7851
7852 pqi_disable_events(ctrl_info);
7853 pqi_cancel_update_time_worker(ctrl_info);
7854 pqi_cancel_rescan_worker(ctrl_info);
7855 pqi_wait_until_scan_finished(ctrl_info);
7856 pqi_wait_until_lun_reset_finished(ctrl_info);
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007857 pqi_wait_until_ofa_finished(ctrl_info);
Kevin Barnett58322fe2017-08-10 13:46:45 -05007858 pqi_flush_cache(ctrl_info, SUSPEND);
Kevin Barnett061ef062017-05-03 18:53:05 -05007859 pqi_ctrl_block_requests(ctrl_info);
7860 pqi_ctrl_wait_until_quiesced(ctrl_info);
7861 pqi_wait_until_inbound_queues_empty(ctrl_info);
Mahesh Rajashekhara4fd22c12018-12-18 17:39:07 -06007862 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
Kevin Barnett061ef062017-05-03 18:53:05 -05007863 pqi_stop_heartbeat_timer(ctrl_info);
7864
7865 if (state.event == PM_EVENT_FREEZE)
7866 return 0;
7867
7868 pci_save_state(pci_dev);
7869 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
7870
7871 ctrl_info->controller_online = false;
7872 ctrl_info->pqi_mode_enabled = false;
7873
7874 return 0;
7875}
7876
Arnd Bergmann5c146682017-05-18 10:32:18 +02007877static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
Kevin Barnett061ef062017-05-03 18:53:05 -05007878{
7879 int rc;
7880 struct pqi_ctrl_info *ctrl_info;
7881
7882 ctrl_info = pci_get_drvdata(pci_dev);
7883
7884 if (pci_dev->current_state != PCI_D0) {
7885 ctrl_info->max_hw_queue_index = 0;
7886 pqi_free_interrupts(ctrl_info);
7887 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
7888 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
7889 IRQF_SHARED, DRIVER_NAME_SHORT,
7890 &ctrl_info->queue_groups[0]);
7891 if (rc) {
7892 dev_err(&ctrl_info->pci_dev->dev,
7893 "irq %u init failed with error %d\n",
7894 pci_dev->irq, rc);
7895 return rc;
7896 }
7897 pqi_start_heartbeat_timer(ctrl_info);
7898 pqi_ctrl_unblock_requests(ctrl_info);
7899 return 0;
7900 }
7901
7902 pci_set_power_state(pci_dev, PCI_D0);
7903 pci_restore_state(pci_dev);
7904
7905 return pqi_ctrl_init_resume(ctrl_info);
7906}
7907
Kevin Barnett6c223762016-06-27 16:41:00 -05007908/* Define the PCI IDs for the controllers that we support. */
7909static const struct pci_device_id pqi_pci_id_table[] = {
7910 {
7911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007912 0x105b, 0x1211)
7913 },
7914 {
7915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7916 0x105b, 0x1321)
7917 },
7918 {
7919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007920 0x152d, 0x8a22)
7921 },
7922 {
7923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7924 0x152d, 0x8a23)
7925 },
7926 {
7927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7928 0x152d, 0x8a24)
7929 },
7930 {
7931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7932 0x152d, 0x8a36)
7933 },
7934 {
7935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7936 0x152d, 0x8a37)
7937 },
7938 {
7939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007940 0x193d, 0x8460)
7941 },
7942 {
7943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7944 0x193d, 0x8461)
7945 },
7946 {
7947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Murthy Bhat84a77fe2018-12-07 16:28:53 -06007948 0x193d, 0xc460)
7949 },
7950 {
7951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7952 0x193d, 0xc461)
7953 },
7954 {
7955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007956 0x193d, 0xf460)
7957 },
7958 {
7959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7960 0x193d, 0xf461)
7961 },
7962 {
7963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7964 0x1bd4, 0x0045)
7965 },
7966 {
7967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7968 0x1bd4, 0x0046)
7969 },
7970 {
7971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7972 0x1bd4, 0x0047)
7973 },
7974 {
7975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7976 0x1bd4, 0x0048)
7977 },
7978 {
7979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007980 0x1bd4, 0x004a)
7981 },
7982 {
7983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7984 0x1bd4, 0x004b)
7985 },
7986 {
7987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7988 0x1bd4, 0x004c)
7989 },
7990 {
7991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Ajish Koshyc1b10472018-12-07 16:29:18 -06007992 0x19e5, 0xd227)
7993 },
7994 {
7995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7996 0x19e5, 0xd228)
7997 },
7998 {
7999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8000 0x19e5, 0xd229)
8001 },
8002 {
8003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8004 0x19e5, 0xd22a)
8005 },
8006 {
8007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8008 0x19e5, 0xd22b)
8009 },
8010 {
8011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8012 0x19e5, 0xd22c)
8013 },
8014 {
8015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05008016 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8017 },
8018 {
8019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05008020 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
Kevin Barnett6c223762016-06-27 16:41:00 -05008021 },
8022 {
8023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8024 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8025 },
8026 {
8027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8028 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8029 },
8030 {
8031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8032 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8033 },
8034 {
8035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8036 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8037 },
8038 {
8039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8040 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8041 },
8042 {
8043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8044 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8045 },
8046 {
8047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008048 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8049 },
8050 {
8051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05008052 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8053 },
8054 {
8055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05008056 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8057 },
8058 {
8059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8060 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8061 },
8062 {
8063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8064 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8065 },
8066 {
8067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8068 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8069 },
8070 {
8071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8072 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8073 },
8074 {
8075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8076 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8077 },
8078 {
8079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8080 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8081 },
8082 {
8083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008084 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8085 },
8086 {
8087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8088 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8089 },
8090 {
8091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05008092 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8093 },
8094 {
8095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008096 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8097 },
8098 {
8099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8100 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8101 },
8102 {
8103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8104 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8105 },
8106 {
8107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8108 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8109 },
8110 {
8111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8112 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8113 },
8114 {
8115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06008116 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8117 },
8118 {
8119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008120 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8121 },
8122 {
8123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8124 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8125 },
8126 {
8127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettbd809e82017-09-27 16:29:59 -05008128 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8129 },
8130 {
8131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8132 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8133 },
8134 {
8135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008136 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8137 },
8138 {
8139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05008140 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8141 },
8142 {
8143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05008144 PCI_VENDOR_ID_DELL, 0x1fe0)
8145 },
8146 {
8147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008148 PCI_VENDOR_ID_HP, 0x0600)
8149 },
8150 {
8151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8152 PCI_VENDOR_ID_HP, 0x0601)
8153 },
8154 {
8155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8156 PCI_VENDOR_ID_HP, 0x0602)
8157 },
8158 {
8159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8160 PCI_VENDOR_ID_HP, 0x0603)
8161 },
8162 {
8163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05008164 PCI_VENDOR_ID_HP, 0x0609)
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008165 },
8166 {
8167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8168 PCI_VENDOR_ID_HP, 0x0650)
8169 },
8170 {
8171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8172 PCI_VENDOR_ID_HP, 0x0651)
8173 },
8174 {
8175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8176 PCI_VENDOR_ID_HP, 0x0652)
8177 },
8178 {
8179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8180 PCI_VENDOR_ID_HP, 0x0653)
8181 },
8182 {
8183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8184 PCI_VENDOR_ID_HP, 0x0654)
8185 },
8186 {
8187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8188 PCI_VENDOR_ID_HP, 0x0655)
8189 },
8190 {
8191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05008192 PCI_VENDOR_ID_HP, 0x0700)
8193 },
8194 {
8195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8196 PCI_VENDOR_ID_HP, 0x0701)
8197 },
8198 {
8199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05008200 PCI_VENDOR_ID_HP, 0x1001)
8201 },
8202 {
8203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8204 PCI_VENDOR_ID_HP, 0x1100)
8205 },
8206 {
8207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8208 PCI_VENDOR_ID_HP, 0x1101)
8209 },
8210 {
8211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05008212 PCI_ANY_ID, PCI_ANY_ID)
8213 },
8214 { 0 }
8215};
8216
8217MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8218
8219static struct pci_driver pqi_pci_driver = {
8220 .name = DRIVER_NAME_SHORT,
8221 .id_table = pqi_pci_id_table,
8222 .probe = pqi_pci_probe,
8223 .remove = pqi_pci_remove,
8224 .shutdown = pqi_shutdown,
Kevin Barnett061ef062017-05-03 18:53:05 -05008225#if defined(CONFIG_PM)
8226 .suspend = pqi_suspend,
8227 .resume = pqi_resume,
8228#endif
Kevin Barnett6c223762016-06-27 16:41:00 -05008229};
8230
8231static int __init pqi_init(void)
8232{
8233 int rc;
8234
8235 pr_info(DRIVER_NAME "\n");
8236
8237 pqi_sas_transport_template =
8238 sas_attach_transport(&pqi_sas_transport_functions);
8239 if (!pqi_sas_transport_template)
8240 return -ENODEV;
8241
Kevin Barnett3c509762017-05-03 18:54:37 -05008242 pqi_process_module_params();
8243
Kevin Barnett6c223762016-06-27 16:41:00 -05008244 rc = pci_register_driver(&pqi_pci_driver);
8245 if (rc)
8246 sas_release_transport(pqi_sas_transport_template);
8247
8248 return rc;
8249}
8250
8251static void __exit pqi_cleanup(void)
8252{
8253 pci_unregister_driver(&pqi_pci_driver);
8254 sas_release_transport(pqi_sas_transport_template);
8255}
8256
8257module_init(pqi_init);
8258module_exit(pqi_cleanup);
8259
8260static void __attribute__((unused)) verify_structures(void)
8261{
8262 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8263 sis_host_to_ctrl_doorbell) != 0x20);
8264 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8265 sis_interrupt_mask) != 0x34);
8266 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8267 sis_ctrl_to_host_doorbell) != 0x9c);
8268 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8269 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8270 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05008271 sis_driver_scratch) != 0xb0);
8272 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05008273 sis_firmware_status) != 0xbc);
8274 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8275 sis_mailbox) != 0x1000);
8276 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8277 pqi_registers) != 0x4000);
8278
8279 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8280 iu_type) != 0x0);
8281 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8282 iu_length) != 0x2);
8283 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8284 response_queue_id) != 0x4);
8285 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8286 work_area) != 0x6);
8287 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8288
8289 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8290 status) != 0x0);
8291 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8292 service_response) != 0x1);
8293 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8294 data_present) != 0x2);
8295 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8296 reserved) != 0x3);
8297 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8298 residual_count) != 0x4);
8299 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8300 data_length) != 0x8);
8301 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8302 reserved1) != 0xa);
8303 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8304 data) != 0xc);
8305 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8306
8307 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8308 data_in_result) != 0x0);
8309 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8310 data_out_result) != 0x1);
8311 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8312 reserved) != 0x2);
8313 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8314 status) != 0x5);
8315 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8316 status_qualifier) != 0x6);
8317 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8318 sense_data_length) != 0x8);
8319 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8320 response_data_length) != 0xa);
8321 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8322 data_in_transferred) != 0xc);
8323 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8324 data_out_transferred) != 0x10);
8325 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8326 data) != 0x14);
8327 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8328
8329 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8330 signature) != 0x0);
8331 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8332 function_and_status_code) != 0x8);
8333 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8334 max_admin_iq_elements) != 0x10);
8335 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8336 max_admin_oq_elements) != 0x11);
8337 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8338 admin_iq_element_length) != 0x12);
8339 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8340 admin_oq_element_length) != 0x13);
8341 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8342 max_reset_timeout) != 0x14);
8343 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8344 legacy_intx_status) != 0x18);
8345 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8346 legacy_intx_mask_set) != 0x1c);
8347 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8348 legacy_intx_mask_clear) != 0x20);
8349 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8350 device_status) != 0x40);
8351 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8352 admin_iq_pi_offset) != 0x48);
8353 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8354 admin_oq_ci_offset) != 0x50);
8355 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8356 admin_iq_element_array_addr) != 0x58);
8357 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8358 admin_oq_element_array_addr) != 0x60);
8359 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8360 admin_iq_ci_addr) != 0x68);
8361 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8362 admin_oq_pi_addr) != 0x70);
8363 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8364 admin_iq_num_elements) != 0x78);
8365 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8366 admin_oq_num_elements) != 0x79);
8367 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8368 admin_queue_int_msg_num) != 0x7a);
8369 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8370 device_error) != 0x80);
8371 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8372 error_details) != 0x88);
8373 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8374 device_reset) != 0x90);
8375 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8376 power_action) != 0x94);
8377 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8378
8379 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8380 header.iu_type) != 0);
8381 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8382 header.iu_length) != 2);
8383 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8384 header.work_area) != 6);
8385 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8386 request_id) != 8);
8387 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8388 function_code) != 10);
8389 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8390 data.report_device_capability.buffer_length) != 44);
8391 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8392 data.report_device_capability.sg_descriptor) != 48);
8393 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8394 data.create_operational_iq.queue_id) != 12);
8395 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8396 data.create_operational_iq.element_array_addr) != 16);
8397 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8398 data.create_operational_iq.ci_addr) != 24);
8399 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8400 data.create_operational_iq.num_elements) != 32);
8401 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8402 data.create_operational_iq.element_length) != 34);
8403 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8404 data.create_operational_iq.queue_protocol) != 36);
8405 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8406 data.create_operational_oq.queue_id) != 12);
8407 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8408 data.create_operational_oq.element_array_addr) != 16);
8409 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8410 data.create_operational_oq.pi_addr) != 24);
8411 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8412 data.create_operational_oq.num_elements) != 32);
8413 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8414 data.create_operational_oq.element_length) != 34);
8415 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8416 data.create_operational_oq.queue_protocol) != 36);
8417 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8418 data.create_operational_oq.int_msg_num) != 40);
8419 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8420 data.create_operational_oq.coalescing_count) != 42);
8421 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8422 data.create_operational_oq.min_coalescing_time) != 44);
8423 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8424 data.create_operational_oq.max_coalescing_time) != 48);
8425 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8426 data.delete_operational_queue.queue_id) != 12);
8427 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8428 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8429 data.create_operational_iq) != 64 - 11);
8430 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8431 data.create_operational_oq) != 64 - 11);
8432 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8433 data.delete_operational_queue) != 64 - 11);
8434
8435 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8436 header.iu_type) != 0);
8437 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8438 header.iu_length) != 2);
8439 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8440 header.work_area) != 6);
8441 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8442 request_id) != 8);
8443 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8444 function_code) != 10);
8445 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8446 status) != 11);
8447 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8448 data.create_operational_iq.status_descriptor) != 12);
8449 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8450 data.create_operational_iq.iq_pi_offset) != 16);
8451 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8452 data.create_operational_oq.status_descriptor) != 12);
8453 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8454 data.create_operational_oq.oq_ci_offset) != 16);
8455 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8456
8457 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8458 header.iu_type) != 0);
8459 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8460 header.iu_length) != 2);
8461 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8462 header.response_queue_id) != 4);
8463 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8464 header.work_area) != 6);
8465 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8466 request_id) != 8);
8467 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8468 nexus_id) != 10);
8469 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8470 buffer_length) != 12);
8471 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8472 lun_number) != 16);
8473 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8474 protocol_specific) != 24);
8475 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8476 error_index) != 27);
8477 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8478 cdb) != 32);
8479 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8480 sg_descriptors) != 64);
8481 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8482 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8483
8484 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8485 header.iu_type) != 0);
8486 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8487 header.iu_length) != 2);
8488 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8489 header.response_queue_id) != 4);
8490 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8491 header.work_area) != 6);
8492 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8493 request_id) != 8);
8494 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8495 nexus_id) != 12);
8496 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8497 buffer_length) != 16);
8498 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8499 data_encryption_key_index) != 22);
8500 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8501 encrypt_tweak_lower) != 24);
8502 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8503 encrypt_tweak_upper) != 28);
8504 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8505 cdb) != 32);
8506 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8507 error_index) != 48);
8508 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8509 num_sg_descriptors) != 50);
8510 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8511 cdb_length) != 51);
8512 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8513 lun_number) != 52);
8514 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8515 sg_descriptors) != 64);
8516 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8517 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8518
8519 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8520 header.iu_type) != 0);
8521 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8522 header.iu_length) != 2);
8523 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8524 request_id) != 8);
8525 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8526 error_index) != 10);
8527
8528 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8529 header.iu_type) != 0);
8530 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8531 header.iu_length) != 2);
8532 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8533 header.response_queue_id) != 4);
8534 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8535 request_id) != 8);
8536 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8537 data.report_event_configuration.buffer_length) != 12);
8538 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8539 data.report_event_configuration.sg_descriptors) != 16);
8540 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8541 data.set_event_configuration.global_event_oq_id) != 10);
8542 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8543 data.set_event_configuration.buffer_length) != 12);
8544 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8545 data.set_event_configuration.sg_descriptors) != 16);
8546
8547 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8548 max_inbound_iu_length) != 6);
8549 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8550 max_outbound_iu_length) != 14);
8551 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8552
8553 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8554 data_length) != 0);
8555 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8556 iq_arbitration_priority_support_bitmask) != 8);
8557 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8558 maximum_aw_a) != 9);
8559 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8560 maximum_aw_b) != 10);
8561 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8562 maximum_aw_c) != 11);
8563 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8564 max_inbound_queues) != 16);
8565 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8566 max_elements_per_iq) != 18);
8567 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8568 max_iq_element_length) != 24);
8569 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8570 min_iq_element_length) != 26);
8571 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8572 max_outbound_queues) != 30);
8573 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8574 max_elements_per_oq) != 32);
8575 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8576 intr_coalescing_time_granularity) != 34);
8577 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8578 max_oq_element_length) != 36);
8579 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8580 min_oq_element_length) != 38);
8581 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8582 iu_layer_descriptors) != 64);
8583 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8584
8585 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8586 event_type) != 0);
8587 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8588 oq_id) != 2);
8589 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8590
8591 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8592 num_event_descriptors) != 2);
8593 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8594 descriptors) != 4);
8595
Kevin Barnett061ef062017-05-03 18:53:05 -05008596 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8597 ARRAY_SIZE(pqi_supported_event_types));
8598
Kevin Barnett6c223762016-06-27 16:41:00 -05008599 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8600 header.iu_type) != 0);
8601 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8602 header.iu_length) != 2);
8603 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8604 event_type) != 8);
8605 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8606 event_id) != 10);
8607 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8608 additional_event_id) != 12);
8609 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8610 data) != 16);
8611 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8612
8613 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8614 header.iu_type) != 0);
8615 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8616 header.iu_length) != 2);
8617 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8618 event_type) != 8);
8619 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8620 event_id) != 10);
8621 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8622 additional_event_id) != 12);
8623 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8624
8625 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8626 header.iu_type) != 0);
8627 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8628 header.iu_length) != 2);
8629 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8630 request_id) != 8);
8631 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8632 nexus_id) != 10);
8633 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8634 lun_number) != 16);
8635 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8636 protocol_specific) != 24);
8637 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8638 outbound_queue_id_to_manage) != 26);
8639 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8640 request_id_to_manage) != 28);
8641 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8642 task_management_function) != 30);
8643 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8644
8645 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8646 header.iu_type) != 0);
8647 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8648 header.iu_length) != 2);
8649 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8650 request_id) != 8);
8651 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8652 nexus_id) != 10);
8653 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8654 additional_response_info) != 12);
8655 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8656 response_code) != 15);
8657 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8658
8659 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8660 configured_logical_drive_count) != 0);
8661 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8662 configuration_signature) != 1);
8663 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8664 firmware_version) != 5);
8665 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8666 extended_logical_unit_count) != 154);
8667 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8668 firmware_build_number) != 190);
8669 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8670 controller_mode) != 292);
8671
Kevin Barnett1be42f42017-05-03 18:53:42 -05008672 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8673 phys_bay_in_box) != 115);
8674 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8675 device_type) != 120);
8676 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8677 redundant_path_present_map) != 1736);
8678 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8679 active_path_number) != 1738);
8680 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8681 alternate_paths_phys_connector) != 1739);
8682 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8683 alternate_paths_phys_box_on_port) != 1755);
8684 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8685 current_queue_depth_limit) != 1796);
8686 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8687
Kevin Barnett6c223762016-06-27 16:41:00 -05008688 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8689 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8690 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8691 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8692 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8693 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8694 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8695 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8696 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8697 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8698 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8699 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8700
8701 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
Kevin Barnettd727a772017-05-03 18:54:25 -05008702 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8703 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
Kevin Barnett6c223762016-06-27 16:41:00 -05008704}